From f99ef92c9024b1dfc604646c8c0912b963c11523 Mon Sep 17 00:00:00 2001 From: Ben Swartzlander Date: Thu, 8 Aug 2013 10:34:06 -0400 Subject: [PATCH] Initialize from cinder --- CONTRIBUTING.md | 12 + HACKING.rst | 275 + LICENSE | 176 + MANIFEST.in | 6 + README.rst | 21 + babel.cfg | 2 + bin/cinder-all | 70 + bin/cinder-api | 52 + bin/cinder-backup | 50 + bin/cinder-clear-rabbit-queues | 76 + bin/cinder-manage | 820 +++ bin/cinder-rootwrap | 128 + bin/cinder-rpc-zmq-receiver | 53 + bin/cinder-scheduler | 50 + bin/cinder-share | 60 + bin/cinder-volume | 61 + bin/cinder-volume-usage-audit | 101 + cinder/__init__.py | 32 + cinder/api/__init__.py | 32 + cinder/api/auth.py | 36 + cinder/api/common.py | 314 + cinder/api/contrib/__init__.py | 39 + cinder/api/contrib/admin_actions.py | 174 + cinder/api/contrib/backups.py | 278 + .../contrib/extended_snapshot_attributes.py | 125 + cinder/api/contrib/hosts.py | 265 + cinder/api/contrib/image_create.py | 31 + cinder/api/contrib/quota_classes.py | 103 + cinder/api/contrib/quotas.py | 125 + cinder/api/contrib/services.py | 139 + cinder/api/contrib/share_actions.py | 80 + cinder/api/contrib/share_snapshots.py | 181 + cinder/api/contrib/shares.py | 215 + cinder/api/contrib/types_extra_specs.py | 162 + cinder/api/contrib/types_manage.py | 122 + cinder/api/contrib/volume_actions.py | 204 + cinder/api/contrib/volume_host_attribute.py | 93 + cinder/api/contrib/volume_image_metadata.py | 106 + cinder/api/contrib/volume_tenant_attribute.py | 91 + cinder/api/extensions.py | 407 ++ cinder/api/middleware/__init__.py | 16 + cinder/api/middleware/auth.py | 140 + cinder/api/middleware/fault.py | 75 + cinder/api/middleware/sizelimit.py | 83 + cinder/api/openstack/__init__.py | 130 + cinder/api/openstack/urlmap.py | 27 + cinder/api/openstack/volume/__init__.py | 27 + cinder/api/openstack/volume/versions.py | 29 + cinder/api/openstack/wsgi.py | 1144 ++++ cinder/api/schemas/atom-link.rng | 141 + cinder/api/schemas/v1.1/extension.rng | 11 + cinder/api/schemas/v1.1/extensions.rng | 6 + cinder/api/schemas/v1.1/limits.rng | 28 + cinder/api/schemas/v1.1/metadata.rng | 9 + cinder/api/sizelimit.py | 28 + cinder/api/urlmap.py | 297 + cinder/api/v1/__init__.py | 0 cinder/api/v1/limits.py | 482 ++ cinder/api/v1/router.py | 95 + cinder/api/v1/snapshot_metadata.py | 164 + cinder/api/v1/snapshots.py | 234 + cinder/api/v1/types.py | 80 + cinder/api/v1/volume_metadata.py | 164 + cinder/api/v1/volumes.py | 421 ++ cinder/api/v2/__init__.py | 0 cinder/api/v2/limits.py | 482 ++ cinder/api/v2/router.py | 70 + cinder/api/v2/snapshot_metadata.py | 164 + cinder/api/v2/snapshots.py | 257 + cinder/api/v2/types.py | 80 + cinder/api/v2/views/__init__.py | 16 + cinder/api/v2/views/volumes.py | 122 + cinder/api/v2/volumes.py | 362 + cinder/api/versions.py | 282 + cinder/api/views/__init__.py | 16 + cinder/api/views/backups.py | 90 + cinder/api/views/limits.py | 100 + cinder/api/views/share_snapshots.py | 74 + cinder/api/views/shares.py | 74 + cinder/api/views/types.py | 34 + cinder/api/views/versions.py | 82 + cinder/api/xmlutil.py | 911 +++ cinder/backup/__init__.py | 23 + cinder/backup/api.py | 171 + cinder/backup/manager.py | 264 + cinder/backup/rpcapi.py | 73 + cinder/backup/services/__init__.py | 14 + cinder/backup/services/swift.py | 384 ++ cinder/brick/__init__.py | 16 + cinder/brick/iscsi/__init__.py | 16 + cinder/brick/iscsi/iscsi.py | 468 ++ cinder/brick/local_dev/__init__.py | 16 + cinder/brick/local_dev/lvm.py | 368 + cinder/common/__init__.py | 15 + cinder/common/sqlalchemyutils.py | 128 + cinder/compute/__init__.py | 0 cinder/compute/aggregate_states.py | 44 + cinder/context.py | 155 + cinder/db/__init__.py | 23 + cinder/db/api.py | 883 +++ cinder/db/base.py | 40 + cinder/db/migration.py | 38 + cinder/db/sqlalchemy/__init__.py | 17 + cinder/db/sqlalchemy/api.py | 2243 +++++++ cinder/db/sqlalchemy/migrate_repo/README | 4 + cinder/db/sqlalchemy/migrate_repo/__init__.py | 0 cinder/db/sqlalchemy/migrate_repo/manage.py | 4 + cinder/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../migrate_repo/versions/001_cinder_init.py | 272 + .../migrate_repo/versions/002_quota_class.py | 150 + .../versions/003_glance_metadata.py | 78 + .../versions/004_volume_type_to_uuid.py | 155 + .../versions/005_add_source_volume_column.py | 41 + .../versions/005_sqlite_downgrade.sql | 124 + .../006_snapshots_add_provider_location.py | 36 + .../versions/007_add_volume_snapshot_fk.py | 41 + .../versions/007_sqlite_downgrade.sql | 32 + .../migrate_repo/versions/008_add_backup.py | 95 + .../009_add_snapshot_metadata_table.py | 60 + .../versions/010_add_share_tables.py | 79 + .../versions/011_add_share_snapshot_table.py | 69 + .../migrate_repo/versions/__init__.py | 0 cinder/db/sqlalchemy/migration.py | 118 + cinder/db/sqlalchemy/models.py | 529 ++ cinder/db/sqlalchemy/session.py | 151 + cinder/exception.py | 614 ++ cinder/flags.py | 261 + cinder/image/__init__.py | 16 + cinder/image/glance.py | 460 ++ cinder/image/image_utils.py | 283 + cinder/locale/bg_BG/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/bs/LC_MESSAGES/cinder.po | 5581 +++++++++++++++ cinder/locale/cinder.pot | 5573 +++++++++++++++ cinder/locale/cs/LC_MESSAGES/cinder.po | 5631 ++++++++++++++++ cinder/locale/da/LC_MESSAGES/cinder.po | 5573 +++++++++++++++ cinder/locale/de/LC_MESSAGES/cinder.po | 5584 +++++++++++++++ cinder/locale/en_AU/LC_MESSAGES/cinder.po | 5605 ++++++++++++++++ cinder/locale/en_GB/LC_MESSAGES/cinder.po | 5605 ++++++++++++++++ cinder/locale/en_US/LC_MESSAGES/cinder.po | 5977 +++++++++++++++++ cinder/locale/es/LC_MESSAGES/cinder.po | 5614 ++++++++++++++++ cinder/locale/fi_FI/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/fr/LC_MESSAGES/cinder.po | 5620 ++++++++++++++++ cinder/locale/it/LC_MESSAGES/cinder.po | 5605 ++++++++++++++++ cinder/locale/ja/LC_MESSAGES/cinder.po | 5606 ++++++++++++++++ cinder/locale/ko/LC_MESSAGES/cinder.po | 5593 +++++++++++++++ cinder/locale/ko_KR/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/pt_BR/LC_MESSAGES/cinder.po | 5604 ++++++++++++++++ cinder/locale/ru/LC_MESSAGES/cinder.po | 5660 ++++++++++++++++ cinder/locale/tl/LC_MESSAGES/cinder.po | 5575 +++++++++++++++ cinder/locale/tr/LC_MESSAGES/cinder.po | 5573 +++++++++++++++ cinder/locale/uk/LC_MESSAGES/cinder.po | 5576 +++++++++++++++ cinder/locale/vi_VN/LC_MESSAGES/cinder.po | 5574 +++++++++++++++ cinder/locale/zh_CN/LC_MESSAGES/cinder.po | 5657 ++++++++++++++++ cinder/locale/zh_TW/LC_MESSAGES/cinder.po | 5598 +++++++++++++++ cinder/manager.py | 221 + cinder/openstack/__init__.py | 15 + cinder/openstack/common/README | 13 + cinder/openstack/common/__init__.py | 15 + cinder/openstack/common/context.py | 81 + cinder/openstack/common/eventlet_backdoor.py | 87 + cinder/openstack/common/exception.py | 142 + cinder/openstack/common/excutils.py | 51 + cinder/openstack/common/fileutils.py | 35 + cinder/openstack/common/gettextutils.py | 50 + cinder/openstack/common/importutils.py | 67 + cinder/openstack/common/jsonutils.py | 167 + cinder/openstack/common/local.py | 48 + cinder/openstack/common/lockutils.py | 278 + cinder/openstack/common/log.py | 540 ++ cinder/openstack/common/loopingcall.py | 147 + cinder/openstack/common/network_utils.py | 68 + cinder/openstack/common/notifier/__init__.py | 14 + cinder/openstack/common/notifier/api.py | 182 + .../openstack/common/notifier/log_notifier.py | 35 + .../common/notifier/no_op_notifier.py | 19 + .../common/notifier/rabbit_notifier.py | 29 + .../openstack/common/notifier/rpc_notifier.py | 46 + .../common/notifier/rpc_notifier2.py | 52 + .../common/notifier/test_notifier.py | 22 + cinder/openstack/common/policy.py | 301 + cinder/openstack/common/processutils.py | 181 + cinder/openstack/common/rootwrap/__init__.py | 16 + cinder/openstack/common/rootwrap/cmd.py | 128 + cinder/openstack/common/rootwrap/filters.py | 226 + cinder/openstack/common/rootwrap/wrapper.py | 149 + cinder/openstack/common/rpc/__init__.py | 307 + cinder/openstack/common/rpc/amqp.py | 677 ++ cinder/openstack/common/rpc/common.py | 508 ++ cinder/openstack/common/rpc/dispatcher.py | 153 + cinder/openstack/common/rpc/impl_fake.py | 195 + cinder/openstack/common/rpc/impl_kombu.py | 838 +++ cinder/openstack/common/rpc/impl_qpid.py | 649 ++ cinder/openstack/common/rpc/impl_zmq.py | 851 +++ cinder/openstack/common/rpc/matchmaker.py | 425 ++ .../openstack/common/rpc/matchmaker_redis.py | 149 + cinder/openstack/common/rpc/proxy.py | 179 + cinder/openstack/common/rpc/service.py | 75 + cinder/openstack/common/rpc/zmq_receiver.py | 41 + cinder/openstack/common/scheduler/__init__.py | 0 cinder/openstack/common/scheduler/filter.py | 71 + .../common/scheduler/filters/__init__.py | 41 + .../filters/availability_zone_filter.py | 30 + .../scheduler/filters/capabilities_filter.py | 63 + .../scheduler/filters/extra_specs_ops.py | 72 + .../common/scheduler/filters/json_filter.py | 150 + cinder/openstack/common/scheduler/weight.py | 91 + .../common/scheduler/weights/__init__.py | 45 + cinder/openstack/common/service.py | 332 + cinder/openstack/common/strutils.py | 150 + cinder/openstack/common/threadgroup.py | 114 + cinder/openstack/common/timeutils.py | 186 + cinder/openstack/common/uuidutils.py | 39 + cinder/policy.py | 105 + cinder/quota.py | 813 +++ cinder/scheduler/__init__.py | 27 + cinder/scheduler/chance.py | 86 + cinder/scheduler/driver.py | 109 + cinder/scheduler/filter_scheduler.py | 354 + cinder/scheduler/filters/__init__.py | 14 + cinder/scheduler/filters/capacity_filter.py | 57 + cinder/scheduler/filters/retry_filter.py | 45 + cinder/scheduler/host_manager.py | 337 + cinder/scheduler/manager.py | 178 + cinder/scheduler/rpcapi.py | 79 + cinder/scheduler/scheduler_options.py | 105 + cinder/scheduler/simple.py | 137 + cinder/scheduler/weights/__init__.py | 14 + cinder/scheduler/weights/capacity.py | 56 + cinder/service.py | 622 ++ cinder/share/__init__.py | 25 + cinder/share/api.py | 325 + cinder/share/configuration.py | 84 + cinder/share/driver.py | 178 + cinder/share/drivers/__init__.py | 22 + cinder/share/drivers/lvm.py | 609 ++ cinder/share/drivers/netapp.py | 745 ++ cinder/share/manager.py | 221 + cinder/share/rpcapi.py | 93 + cinder/test.py | 285 + cinder/testing/README.rst | 66 + cinder/tests/__init__.py | 85 + cinder/tests/api/__init__.py | 19 + cinder/tests/api/common.py | 38 + cinder/tests/api/contrib/__init__.py | 19 + cinder/tests/api/contrib/stubs.py | 125 + .../tests/api/contrib/test_admin_actions.py | 348 + cinder/tests/api/contrib/test_backups.py | 860 +++ .../test_extended_snapshot_attributes.py | 124 + cinder/tests/api/contrib/test_hosts.py | 202 + cinder/tests/api/contrib/test_services.py | 216 + .../tests/api/contrib/test_share_actions.py | 116 + .../tests/api/contrib/test_share_snapshots.py | 190 + cinder/tests/api/contrib/test_shares.py | 245 + .../api/contrib/test_types_extra_specs.py | 232 + cinder/tests/api/contrib/test_types_manage.py | 129 + .../tests/api/contrib/test_volume_actions.py | 248 + .../api/contrib/test_volume_host_attribute.py | 134 + .../api/contrib/test_volume_image_metadata.py | 130 + .../contrib/test_volume_tenant_attribute.py | 137 + cinder/tests/api/extensions/__init__.py | 15 + cinder/tests/api/extensions/foxinsocks.py | 93 + cinder/tests/api/fakes.py | 190 + cinder/tests/api/middleware/__init__.py | 0 cinder/tests/api/middleware/test_auth.py | 59 + cinder/tests/api/middleware/test_faults.py | 208 + cinder/tests/api/middleware/test_sizelimit.py | 100 + cinder/tests/api/openstack/__init__.py | 19 + cinder/tests/api/openstack/test_wsgi.py | 858 +++ cinder/tests/api/test_common.py | 243 + cinder/tests/api/test_extensions.py | 154 + cinder/tests/api/test_router.py | 158 + cinder/tests/api/test_wsgi.py | 67 + cinder/tests/api/test_xmlutil.py | 697 ++ cinder/tests/api/v1/__init__.py | 0 cinder/tests/api/v1/stubs.py | 131 + cinder/tests/api/v1/test_limits.py | 895 +++ cinder/tests/api/v1/test_snapshot_metadata.py | 458 ++ cinder/tests/api/v1/test_snapshots.py | 417 ++ cinder/tests/api/v1/test_types.py | 194 + cinder/tests/api/v1/test_volume_metadata.py | 441 ++ cinder/tests/api/v1/test_volumes.py | 764 +++ cinder/tests/api/v2/__init__.py | 0 cinder/tests/api/v2/stubs.py | 133 + cinder/tests/api/v2/test_limits.py | 890 +++ cinder/tests/api/v2/test_snapshot_metadata.py | 458 ++ cinder/tests/api/v2/test_snapshots.py | 425 ++ cinder/tests/api/v2/test_types.py | 211 + cinder/tests/api/v2/test_volumes.py | 955 +++ cinder/tests/backup/__init__.py | 14 + cinder/tests/backup/fake_service.py | 41 + cinder/tests/backup/fake_swift_client.py | 111 + cinder/tests/brick/__init__.py | 16 + cinder/tests/brick/test_brick_lvm.py | 144 + cinder/tests/db/__init__.py | 20 + cinder/tests/db/fakes.py | 46 + cinder/tests/declare_flags.py | 24 + cinder/tests/fake_driver.py | 118 + cinder/tests/fake_flags.py | 46 + cinder/tests/fake_utils.py | 112 + cinder/tests/glance/__init__.py | 20 + cinder/tests/glance/stubs.py | 112 + cinder/tests/image/__init__.py | 20 + cinder/tests/image/fake.py | 243 + cinder/tests/image/test_glance.py | 590 ++ cinder/tests/integrated/__init__.py | 22 + cinder/tests/integrated/api/__init__.py | 20 + cinder/tests/integrated/api/client.py | 219 + cinder/tests/integrated/integrated_helpers.py | 130 + cinder/tests/integrated/test_extensions.py | 40 + cinder/tests/integrated/test_login.py | 31 + cinder/tests/integrated/test_volumes.py | 198 + cinder/tests/integrated/test_xml.py | 51 + cinder/tests/monkey_patch_example/__init__.py | 33 + .../tests/monkey_patch_example/example_a.py | 29 + .../tests/monkey_patch_example/example_b.py | 30 + cinder/tests/policy.json | 42 + cinder/tests/runtime_flags.py | 24 + cinder/tests/scheduler/__init__.py | 19 + cinder/tests/scheduler/fakes.py | 104 + .../tests/scheduler/test_capacity_weigher.py | 94 + .../tests/scheduler/test_filter_scheduler.py | 302 + cinder/tests/scheduler/test_host_filters.py | 159 + cinder/tests/scheduler/test_host_manager.py | 299 + cinder/tests/scheduler/test_rpcapi.py | 91 + cinder/tests/scheduler/test_scheduler.py | 394 ++ .../tests/scheduler/test_scheduler_options.py | 138 + cinder/tests/test_HpSanISCSIDriver.py | 245 + cinder/tests/test_api.py | 75 + cinder/tests/test_backup.py | 395 ++ cinder/tests/test_backup_swift.py | 208 + cinder/tests/test_context.py | 72 + cinder/tests/test_coraid.py | 268 + cinder/tests/test_drivers_compatibility.py | 184 + cinder/tests/test_emc.py | 766 +++ cinder/tests/test_exception.py | 94 + cinder/tests/test_flags.py | 83 + cinder/tests/test_glusterfs.py | 579 ++ cinder/tests/test_hp3par.py | 1044 +++ cinder/tests/test_huawei.py | 859 +++ cinder/tests/test_iscsi.py | 190 + cinder/tests/test_migrations.conf | 9 + cinder/tests/test_migrations.py | 629 ++ cinder/tests/test_misc.py | 61 + cinder/tests/test_netapp.py | 2361 +++++++ cinder/tests/test_netapp_nfs.py | 695 ++ cinder/tests/test_nexenta.py | 302 + cinder/tests/test_nfs.py | 654 ++ cinder/tests/test_policy.py | 232 + cinder/tests/test_quota.py | 1369 ++++ cinder/tests/test_rbd.py | 266 + cinder/tests/test_scality.py | 185 + cinder/tests/test_service.py | 224 + cinder/tests/test_share.py | 368 + cinder/tests/test_share_api.py | 513 ++ cinder/tests/test_share_driver.py | 49 + cinder/tests/test_share_lvm.py | 754 +++ cinder/tests/test_share_netapp.py | 690 ++ cinder/tests/test_share_rpcapi.py | 150 + cinder/tests/test_sheepdog.py | 64 + cinder/tests/test_skip_examples.py | 47 + cinder/tests/test_solidfire.py | 283 + cinder/tests/test_storwize_svc.py | 1978 ++++++ cinder/tests/test_test.py | 46 + cinder/tests/test_test_utils.py | 28 + cinder/tests/test_utils.py | 728 ++ cinder/tests/test_volume.py | 1331 ++++ cinder/tests/test_volume_configuration.py | 72 + cinder/tests/test_volume_glance_metadata.py | 131 + cinder/tests/test_volume_rpcapi.py | 172 + cinder/tests/test_volume_types.py | 187 + cinder/tests/test_volume_types_extra_specs.py | 130 + cinder/tests/test_volume_utils.py | 117 + cinder/tests/test_windows.py | 220 + cinder/tests/test_wsgi.py | 264 + cinder/tests/test_xenapi_sm.py | 509 ++ cinder/tests/test_xiv.py | 245 + cinder/tests/test_zadara.py | 581 ++ cinder/tests/utils.py | 32 + cinder/tests/var/ca.crt | 35 + cinder/tests/var/certificate.crt | 30 + cinder/tests/var/privatekey.key | 51 + cinder/tests/windows/__init__.py | 0 cinder/tests/windows/basetestcase.py | 96 + cinder/tests/windows/db_fakes.py | 36 + cinder/tests/windows/mockproxy.py | 238 + cinder/tests/windows/stubs/README.rst | 2 + ...river.test_check_for_setup_errors_wmi.p.gz | Bin 0 -> 473 bytes ...stWindowsDriver.test_create_export_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_create_export_wmi.p.gz | Bin 0 -> 1455 bytes ...WindowsDriver.test_create_snapshot_os.p.gz | Bin 0 -> 441 bytes ...indowsDriver.test_create_snapshot_wmi.p.gz | Bin 0 -> 1476 bytes ...r.test_create_volume_from_snapshot_os.p.gz | Bin 0 -> 500 bytes ....test_create_volume_from_snapshot_wmi.p.gz | Bin 0 -> 1840 bytes ...stWindowsDriver.test_create_volume_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_create_volume_wmi.p.gz | Bin 0 -> 1057 bytes ...WindowsDriver.test_delete_snapshot_os.p.gz | Bin 0 -> 441 bytes ...indowsDriver.test_delete_snapshot_wmi.p.gz | Bin 0 -> 1504 bytes ...stWindowsDriver.test_delete_volume_os.p.gz | Bin 0 -> 472 bytes ...tWindowsDriver.test_delete_volume_wmi.p.gz | Bin 0 -> 1040 bytes ...stWindowsDriver.test_ensure_export_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_ensure_export_wmi.p.gz | Bin 0 -> 1453 bytes ...sDriver.test_initialize_connection_os.p.gz | Bin 0 -> 447 bytes ...Driver.test_initialize_connection_wmi.p.gz | Bin 0 -> 1982 bytes ...stWindowsDriver.test_remove_export_os.p.gz | Bin 0 -> 439 bytes ...tWindowsDriver.test_remove_export_wmi.p.gz | Bin 0 -> 1462 bytes cinder/tests/windows/windowsutils.py | 145 + cinder/tests/xenapi/__init__.py | 0 cinder/utils.py | 1216 ++++ cinder/version.py | 25 + cinder/volume/__init__.py | 25 + cinder/volume/api.py | 765 +++ cinder/volume/configuration.py | 83 + cinder/volume/driver.py | 551 ++ cinder/volume/drivers/__init__.py | 22 + cinder/volume/drivers/coraid.py | 424 ++ cinder/volume/drivers/emc/__init__.py | 0 .../drivers/emc/cinder_emc_config.xml.sample | 12 + cinder/volume/drivers/emc/emc_smis_common.py | 1564 +++++ cinder/volume/drivers/emc/emc_smis_iscsi.py | 246 + cinder/volume/drivers/glusterfs.py | 283 + cinder/volume/drivers/huawei/__init__.py | 15 + .../huawei/cinder_huawei_conf.xml.sample | 34 + cinder/volume/drivers/huawei/huawei_iscsi.py | 1547 +++++ cinder/volume/drivers/lvm.py | 688 ++ cinder/volume/drivers/netapp/__init__.py | 0 cinder/volume/drivers/netapp/api.py | 410 ++ cinder/volume/drivers/netapp/iscsi.py | 2528 +++++++ cinder/volume/drivers/netapp/nfs.py | 624 ++ cinder/volume/drivers/nexenta/__init__.py | 33 + cinder/volume/drivers/nexenta/jsonrpc.py | 84 + cinder/volume/drivers/nexenta/volume.py | 353 + cinder/volume/drivers/nfs.py | 357 + cinder/volume/drivers/rbd.py | 306 + cinder/volume/drivers/san/__init__.py | 27 + cinder/volume/drivers/san/hp/__init__.py | 0 .../volume/drivers/san/hp/hp_3par_common.py | 742 ++ cinder/volume/drivers/san/hp/hp_3par_fc.py | 259 + cinder/volume/drivers/san/hp/hp_3par_iscsi.py | 279 + cinder/volume/drivers/san/hp_lefthand.py | 314 + cinder/volume/drivers/san/san.py | 177 + cinder/volume/drivers/san/solaris.py | 285 + cinder/volume/drivers/scality.py | 261 + cinder/volume/drivers/sheepdog.py | 141 + cinder/volume/drivers/solidfire.py | 590 ++ cinder/volume/drivers/storwize_svc.py | 1627 +++++ cinder/volume/drivers/windows.py | 246 + cinder/volume/drivers/xenapi/__init__.py | 13 + cinder/volume/drivers/xenapi/lib.py | 542 ++ cinder/volume/drivers/xenapi/sm.py | 272 + cinder/volume/drivers/xenapi/tools.py | 7 + cinder/volume/drivers/xiv.py | 122 + cinder/volume/drivers/zadara.py | 491 ++ cinder/volume/manager.py | 725 ++ cinder/volume/rpcapi.py | 130 + cinder/volume/utils.py | 131 + cinder/volume/volume_types.py | 158 + cinder/wsgi.py | 493 ++ contrib/redhat-eventlet.patch | 16 + doc/.gitignore | 3 + doc/Makefile | 97 + doc/README.rst | 55 + doc/ext/__init__.py | 0 doc/ext/cinder_autodoc.py | 12 + doc/ext/cinder_todo.py | 104 + doc/find_autodoc_modules.sh | 20 + doc/generate_autodoc_index.sh | 46 + doc/source/_ga/layout.html | 17 + doc/source/_static/.gitignore | 0 doc/source/_static/.placeholder | 0 doc/source/_static/basic.css | 416 ++ doc/source/_static/default.css | 230 + doc/source/_static/jquery.tweet.js | 154 + doc/source/_static/tweaks.css | 218 + doc/source/_templates/.gitignore | 0 doc/source/_templates/.placeholder | 0 doc/source/_theme/layout.html | 95 + doc/source/_theme/theme.conf | 5 + doc/source/conf.py | 244 + doc/source/devref/addmethod.openstackapi.rst | 56 + doc/source/devref/api.rst | 167 + doc/source/devref/architecture.rst | 53 + doc/source/devref/auth.rst | 257 + doc/source/devref/cinder.rst | 215 + doc/source/devref/database.rst | 63 + doc/source/devref/development.environment.rst | 152 + doc/source/devref/fakes.rst | 85 + doc/source/devref/gerrit.rst | 16 + doc/source/devref/il8n.rst | 33 + doc/source/devref/index.rst | 81 + doc/source/devref/jenkins.rst | 37 + doc/source/devref/launchpad.rst | 54 + doc/source/devref/rpc.rst | 151 + doc/source/devref/scheduler.rst | 61 + doc/source/devref/services.rst | 55 + doc/source/devref/threading.rst | 51 + doc/source/devref/unit_tests.rst | 159 + doc/source/devref/volume.rst | 64 + doc/source/images/rpc/arch.png | Bin 0 -> 26690 bytes doc/source/images/rpc/arch.svg | 292 + doc/source/images/rpc/flow1.png | Bin 0 -> 40982 bytes doc/source/images/rpc/flow1.svg | 617 ++ doc/source/images/rpc/flow2.png | Bin 0 -> 30650 bytes doc/source/images/rpc/flow2.svg | 423 ++ doc/source/images/rpc/rabt.png | Bin 0 -> 44964 bytes doc/source/images/rpc/rabt.svg | 581 ++ doc/source/images/rpc/state.png | Bin 0 -> 38543 bytes doc/source/index.rst | 62 + doc/source/man/cinder-manage.rst | 281 + etc/cinder/api-paste.ini | 62 + etc/cinder/cinder.conf.sample | 1341 ++++ etc/cinder/logging_sample.conf | 76 + etc/cinder/policy.json | 34 + etc/cinder/rootwrap.conf | 27 + etc/cinder/rootwrap.d/share.filters | 36 + etc/cinder/rootwrap.d/volume.filters | 59 + openstack-common.conf | 32 + pylintrc | 38 + run_tests.sh | 182 + setup.cfg | 81 + setup.py | 21 + tools/conf/extract_opts.py | 195 + tools/conf/generate_sample.sh | 24 + tools/enable-pre-commit-hook.sh | 42 + tools/install_venv.py | 72 + tools/install_venv_common.py | 220 + tools/lintstack.py | 199 + tools/lintstack.sh | 59 + tools/patch_tox_venv.py | 38 + tools/pip-requires | 26 + tools/test-requires | 18 + tools/with_venv.sh | 4 + tox.ini | 42 + 532 files changed, 240158 insertions(+) create mode 100644 CONTRIBUTING.md create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 bin/cinder-all create mode 100755 bin/cinder-api create mode 100755 bin/cinder-backup create mode 100755 bin/cinder-clear-rabbit-queues create mode 100755 bin/cinder-manage create mode 100755 bin/cinder-rootwrap create mode 100755 bin/cinder-rpc-zmq-receiver create mode 100755 bin/cinder-scheduler create mode 100755 bin/cinder-share create mode 100755 bin/cinder-volume create mode 100755 bin/cinder-volume-usage-audit create mode 100644 cinder/__init__.py create mode 100644 cinder/api/__init__.py create mode 100644 cinder/api/auth.py create mode 100644 cinder/api/common.py create mode 100644 cinder/api/contrib/__init__.py create mode 100644 cinder/api/contrib/admin_actions.py create mode 100644 cinder/api/contrib/backups.py create mode 100644 cinder/api/contrib/extended_snapshot_attributes.py create mode 100644 cinder/api/contrib/hosts.py create mode 100644 cinder/api/contrib/image_create.py create mode 100644 cinder/api/contrib/quota_classes.py create mode 100644 cinder/api/contrib/quotas.py create mode 100644 cinder/api/contrib/services.py create mode 100644 cinder/api/contrib/share_actions.py create mode 100644 cinder/api/contrib/share_snapshots.py create mode 100644 cinder/api/contrib/shares.py create mode 100644 cinder/api/contrib/types_extra_specs.py create mode 100644 cinder/api/contrib/types_manage.py create mode 100644 cinder/api/contrib/volume_actions.py create mode 100644 cinder/api/contrib/volume_host_attribute.py create mode 100644 cinder/api/contrib/volume_image_metadata.py create mode 100644 cinder/api/contrib/volume_tenant_attribute.py create mode 100644 cinder/api/extensions.py create mode 100644 cinder/api/middleware/__init__.py create mode 100644 cinder/api/middleware/auth.py create mode 100644 cinder/api/middleware/fault.py create mode 100644 cinder/api/middleware/sizelimit.py create mode 100644 cinder/api/openstack/__init__.py create mode 100644 cinder/api/openstack/urlmap.py create mode 100644 cinder/api/openstack/volume/__init__.py create mode 100644 cinder/api/openstack/volume/versions.py create mode 100644 cinder/api/openstack/wsgi.py create mode 100644 cinder/api/schemas/atom-link.rng create mode 100644 cinder/api/schemas/v1.1/extension.rng create mode 100644 cinder/api/schemas/v1.1/extensions.rng create mode 100644 cinder/api/schemas/v1.1/limits.rng create mode 100644 cinder/api/schemas/v1.1/metadata.rng create mode 100644 cinder/api/sizelimit.py create mode 100644 cinder/api/urlmap.py create mode 100644 cinder/api/v1/__init__.py create mode 100644 cinder/api/v1/limits.py create mode 100644 cinder/api/v1/router.py create mode 100644 cinder/api/v1/snapshot_metadata.py create mode 100644 cinder/api/v1/snapshots.py create mode 100644 cinder/api/v1/types.py create mode 100644 cinder/api/v1/volume_metadata.py create mode 100644 cinder/api/v1/volumes.py create mode 100644 cinder/api/v2/__init__.py create mode 100644 cinder/api/v2/limits.py create mode 100644 cinder/api/v2/router.py create mode 100644 cinder/api/v2/snapshot_metadata.py create mode 100644 cinder/api/v2/snapshots.py create mode 100644 cinder/api/v2/types.py create mode 100644 cinder/api/v2/views/__init__.py create mode 100644 cinder/api/v2/views/volumes.py create mode 100644 cinder/api/v2/volumes.py create mode 100644 cinder/api/versions.py create mode 100644 cinder/api/views/__init__.py create mode 100644 cinder/api/views/backups.py create mode 100644 cinder/api/views/limits.py create mode 100644 cinder/api/views/share_snapshots.py create mode 100644 cinder/api/views/shares.py create mode 100644 cinder/api/views/types.py create mode 100644 cinder/api/views/versions.py create mode 100644 cinder/api/xmlutil.py create mode 100644 cinder/backup/__init__.py create mode 100644 cinder/backup/api.py create mode 100755 cinder/backup/manager.py create mode 100644 cinder/backup/rpcapi.py create mode 100644 cinder/backup/services/__init__.py create mode 100644 cinder/backup/services/swift.py create mode 100644 cinder/brick/__init__.py create mode 100644 cinder/brick/iscsi/__init__.py create mode 100644 cinder/brick/iscsi/iscsi.py create mode 100644 cinder/brick/local_dev/__init__.py create mode 100644 cinder/brick/local_dev/lvm.py create mode 100644 cinder/common/__init__.py create mode 100755 cinder/common/sqlalchemyutils.py create mode 100644 cinder/compute/__init__.py create mode 100644 cinder/compute/aggregate_states.py create mode 100644 cinder/context.py create mode 100644 cinder/db/__init__.py create mode 100644 cinder/db/api.py create mode 100644 cinder/db/base.py create mode 100644 cinder/db/migration.py create mode 100644 cinder/db/sqlalchemy/__init__.py create mode 100644 cinder/db/sqlalchemy/api.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/README create mode 100644 cinder/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/manage.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 cinder/db/sqlalchemy/migration.py create mode 100644 cinder/db/sqlalchemy/models.py create mode 100644 cinder/db/sqlalchemy/session.py create mode 100644 cinder/exception.py create mode 100644 cinder/flags.py create mode 100644 cinder/image/__init__.py create mode 100644 cinder/image/glance.py create mode 100644 cinder/image/image_utils.py create mode 100644 cinder/locale/bg_BG/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/bs/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/cinder.pot create mode 100644 cinder/locale/cs/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/da/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/de/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/en_AU/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/en_GB/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/en_US/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/es/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/fi_FI/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/fr/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/it/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ja/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ko/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ko_KR/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/pt_BR/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/ru/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/tl/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/tr/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/uk/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/vi_VN/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/zh_CN/LC_MESSAGES/cinder.po create mode 100644 cinder/locale/zh_TW/LC_MESSAGES/cinder.po create mode 100644 cinder/manager.py create mode 100644 cinder/openstack/__init__.py create mode 100644 cinder/openstack/common/README create mode 100644 cinder/openstack/common/__init__.py create mode 100644 cinder/openstack/common/context.py create mode 100644 cinder/openstack/common/eventlet_backdoor.py create mode 100644 cinder/openstack/common/exception.py create mode 100644 cinder/openstack/common/excutils.py create mode 100644 cinder/openstack/common/fileutils.py create mode 100644 cinder/openstack/common/gettextutils.py create mode 100644 cinder/openstack/common/importutils.py create mode 100644 cinder/openstack/common/jsonutils.py create mode 100644 cinder/openstack/common/local.py create mode 100644 cinder/openstack/common/lockutils.py create mode 100644 cinder/openstack/common/log.py create mode 100644 cinder/openstack/common/loopingcall.py create mode 100644 cinder/openstack/common/network_utils.py create mode 100644 cinder/openstack/common/notifier/__init__.py create mode 100644 cinder/openstack/common/notifier/api.py create mode 100644 cinder/openstack/common/notifier/log_notifier.py create mode 100644 cinder/openstack/common/notifier/no_op_notifier.py create mode 100644 cinder/openstack/common/notifier/rabbit_notifier.py create mode 100644 cinder/openstack/common/notifier/rpc_notifier.py create mode 100644 cinder/openstack/common/notifier/rpc_notifier2.py create mode 100644 cinder/openstack/common/notifier/test_notifier.py create mode 100644 cinder/openstack/common/policy.py create mode 100644 cinder/openstack/common/processutils.py create mode 100644 cinder/openstack/common/rootwrap/__init__.py create mode 100755 cinder/openstack/common/rootwrap/cmd.py create mode 100644 cinder/openstack/common/rootwrap/filters.py create mode 100644 cinder/openstack/common/rootwrap/wrapper.py create mode 100644 cinder/openstack/common/rpc/__init__.py create mode 100644 cinder/openstack/common/rpc/amqp.py create mode 100644 cinder/openstack/common/rpc/common.py create mode 100644 cinder/openstack/common/rpc/dispatcher.py create mode 100644 cinder/openstack/common/rpc/impl_fake.py create mode 100644 cinder/openstack/common/rpc/impl_kombu.py create mode 100644 cinder/openstack/common/rpc/impl_qpid.py create mode 100644 cinder/openstack/common/rpc/impl_zmq.py create mode 100644 cinder/openstack/common/rpc/matchmaker.py create mode 100644 cinder/openstack/common/rpc/matchmaker_redis.py create mode 100644 cinder/openstack/common/rpc/proxy.py create mode 100644 cinder/openstack/common/rpc/service.py create mode 100755 cinder/openstack/common/rpc/zmq_receiver.py create mode 100644 cinder/openstack/common/scheduler/__init__.py create mode 100644 cinder/openstack/common/scheduler/filter.py create mode 100644 cinder/openstack/common/scheduler/filters/__init__.py create mode 100644 cinder/openstack/common/scheduler/filters/availability_zone_filter.py create mode 100644 cinder/openstack/common/scheduler/filters/capabilities_filter.py create mode 100644 cinder/openstack/common/scheduler/filters/extra_specs_ops.py create mode 100644 cinder/openstack/common/scheduler/filters/json_filter.py create mode 100644 cinder/openstack/common/scheduler/weight.py create mode 100644 cinder/openstack/common/scheduler/weights/__init__.py create mode 100644 cinder/openstack/common/service.py create mode 100644 cinder/openstack/common/strutils.py create mode 100644 cinder/openstack/common/threadgroup.py create mode 100644 cinder/openstack/common/timeutils.py create mode 100644 cinder/openstack/common/uuidutils.py create mode 100644 cinder/policy.py create mode 100644 cinder/quota.py create mode 100644 cinder/scheduler/__init__.py create mode 100644 cinder/scheduler/chance.py create mode 100644 cinder/scheduler/driver.py create mode 100644 cinder/scheduler/filter_scheduler.py create mode 100644 cinder/scheduler/filters/__init__.py create mode 100644 cinder/scheduler/filters/capacity_filter.py create mode 100644 cinder/scheduler/filters/retry_filter.py create mode 100644 cinder/scheduler/host_manager.py create mode 100644 cinder/scheduler/manager.py create mode 100644 cinder/scheduler/rpcapi.py create mode 100644 cinder/scheduler/scheduler_options.py create mode 100644 cinder/scheduler/simple.py create mode 100644 cinder/scheduler/weights/__init__.py create mode 100644 cinder/scheduler/weights/capacity.py create mode 100644 cinder/service.py create mode 100644 cinder/share/__init__.py create mode 100644 cinder/share/api.py create mode 100644 cinder/share/configuration.py create mode 100644 cinder/share/driver.py create mode 100644 cinder/share/drivers/__init__.py create mode 100644 cinder/share/drivers/lvm.py create mode 100644 cinder/share/drivers/netapp.py create mode 100644 cinder/share/manager.py create mode 100644 cinder/share/rpcapi.py create mode 100644 cinder/test.py create mode 100644 cinder/testing/README.rst create mode 100644 cinder/tests/__init__.py create mode 100644 cinder/tests/api/__init__.py create mode 100644 cinder/tests/api/common.py create mode 100644 cinder/tests/api/contrib/__init__.py create mode 100644 cinder/tests/api/contrib/stubs.py create mode 100644 cinder/tests/api/contrib/test_admin_actions.py create mode 100644 cinder/tests/api/contrib/test_backups.py create mode 100644 cinder/tests/api/contrib/test_extended_snapshot_attributes.py create mode 100644 cinder/tests/api/contrib/test_hosts.py create mode 100644 cinder/tests/api/contrib/test_services.py create mode 100644 cinder/tests/api/contrib/test_share_actions.py create mode 100644 cinder/tests/api/contrib/test_share_snapshots.py create mode 100644 cinder/tests/api/contrib/test_shares.py create mode 100644 cinder/tests/api/contrib/test_types_extra_specs.py create mode 100644 cinder/tests/api/contrib/test_types_manage.py create mode 100644 cinder/tests/api/contrib/test_volume_actions.py create mode 100644 cinder/tests/api/contrib/test_volume_host_attribute.py create mode 100644 cinder/tests/api/contrib/test_volume_image_metadata.py create mode 100644 cinder/tests/api/contrib/test_volume_tenant_attribute.py create mode 100644 cinder/tests/api/extensions/__init__.py create mode 100644 cinder/tests/api/extensions/foxinsocks.py create mode 100644 cinder/tests/api/fakes.py create mode 100644 cinder/tests/api/middleware/__init__.py create mode 100644 cinder/tests/api/middleware/test_auth.py create mode 100644 cinder/tests/api/middleware/test_faults.py create mode 100644 cinder/tests/api/middleware/test_sizelimit.py create mode 100644 cinder/tests/api/openstack/__init__.py create mode 100644 cinder/tests/api/openstack/test_wsgi.py create mode 100644 cinder/tests/api/test_common.py create mode 100644 cinder/tests/api/test_extensions.py create mode 100644 cinder/tests/api/test_router.py create mode 100644 cinder/tests/api/test_wsgi.py create mode 100644 cinder/tests/api/test_xmlutil.py create mode 100644 cinder/tests/api/v1/__init__.py create mode 100644 cinder/tests/api/v1/stubs.py create mode 100644 cinder/tests/api/v1/test_limits.py create mode 100644 cinder/tests/api/v1/test_snapshot_metadata.py create mode 100644 cinder/tests/api/v1/test_snapshots.py create mode 100644 cinder/tests/api/v1/test_types.py create mode 100644 cinder/tests/api/v1/test_volume_metadata.py create mode 100644 cinder/tests/api/v1/test_volumes.py create mode 100644 cinder/tests/api/v2/__init__.py create mode 100644 cinder/tests/api/v2/stubs.py create mode 100644 cinder/tests/api/v2/test_limits.py create mode 100644 cinder/tests/api/v2/test_snapshot_metadata.py create mode 100644 cinder/tests/api/v2/test_snapshots.py create mode 100644 cinder/tests/api/v2/test_types.py create mode 100644 cinder/tests/api/v2/test_volumes.py create mode 100644 cinder/tests/backup/__init__.py create mode 100644 cinder/tests/backup/fake_service.py create mode 100644 cinder/tests/backup/fake_swift_client.py create mode 100644 cinder/tests/brick/__init__.py create mode 100644 cinder/tests/brick/test_brick_lvm.py create mode 100644 cinder/tests/db/__init__.py create mode 100644 cinder/tests/db/fakes.py create mode 100644 cinder/tests/declare_flags.py create mode 100644 cinder/tests/fake_driver.py create mode 100644 cinder/tests/fake_flags.py create mode 100644 cinder/tests/fake_utils.py create mode 100644 cinder/tests/glance/__init__.py create mode 100644 cinder/tests/glance/stubs.py create mode 100644 cinder/tests/image/__init__.py create mode 100644 cinder/tests/image/fake.py create mode 100644 cinder/tests/image/test_glance.py create mode 100644 cinder/tests/integrated/__init__.py create mode 100644 cinder/tests/integrated/api/__init__.py create mode 100644 cinder/tests/integrated/api/client.py create mode 100644 cinder/tests/integrated/integrated_helpers.py create mode 100644 cinder/tests/integrated/test_extensions.py create mode 100644 cinder/tests/integrated/test_login.py create mode 100755 cinder/tests/integrated/test_volumes.py create mode 100644 cinder/tests/integrated/test_xml.py create mode 100644 cinder/tests/monkey_patch_example/__init__.py create mode 100644 cinder/tests/monkey_patch_example/example_a.py create mode 100644 cinder/tests/monkey_patch_example/example_b.py create mode 100644 cinder/tests/policy.json create mode 100644 cinder/tests/runtime_flags.py create mode 100644 cinder/tests/scheduler/__init__.py create mode 100644 cinder/tests/scheduler/fakes.py create mode 100644 cinder/tests/scheduler/test_capacity_weigher.py create mode 100644 cinder/tests/scheduler/test_filter_scheduler.py create mode 100644 cinder/tests/scheduler/test_host_filters.py create mode 100644 cinder/tests/scheduler/test_host_manager.py create mode 100644 cinder/tests/scheduler/test_rpcapi.py create mode 100644 cinder/tests/scheduler/test_scheduler.py create mode 100644 cinder/tests/scheduler/test_scheduler_options.py create mode 100644 cinder/tests/test_HpSanISCSIDriver.py create mode 100644 cinder/tests/test_api.py create mode 100644 cinder/tests/test_backup.py create mode 100644 cinder/tests/test_backup_swift.py create mode 100644 cinder/tests/test_context.py create mode 100644 cinder/tests/test_coraid.py create mode 100644 cinder/tests/test_drivers_compatibility.py create mode 100644 cinder/tests/test_emc.py create mode 100644 cinder/tests/test_exception.py create mode 100644 cinder/tests/test_flags.py create mode 100644 cinder/tests/test_glusterfs.py create mode 100644 cinder/tests/test_hp3par.py create mode 100644 cinder/tests/test_huawei.py create mode 100644 cinder/tests/test_iscsi.py create mode 100644 cinder/tests/test_migrations.conf create mode 100644 cinder/tests/test_migrations.py create mode 100644 cinder/tests/test_misc.py create mode 100644 cinder/tests/test_netapp.py create mode 100644 cinder/tests/test_netapp_nfs.py create mode 100644 cinder/tests/test_nexenta.py create mode 100644 cinder/tests/test_nfs.py create mode 100644 cinder/tests/test_policy.py create mode 100644 cinder/tests/test_quota.py create mode 100644 cinder/tests/test_rbd.py create mode 100644 cinder/tests/test_scality.py create mode 100644 cinder/tests/test_service.py create mode 100644 cinder/tests/test_share.py create mode 100644 cinder/tests/test_share_api.py create mode 100644 cinder/tests/test_share_driver.py create mode 100644 cinder/tests/test_share_lvm.py create mode 100644 cinder/tests/test_share_netapp.py create mode 100644 cinder/tests/test_share_rpcapi.py create mode 100644 cinder/tests/test_sheepdog.py create mode 100644 cinder/tests/test_skip_examples.py create mode 100644 cinder/tests/test_solidfire.py create mode 100755 cinder/tests/test_storwize_svc.py create mode 100644 cinder/tests/test_test.py create mode 100644 cinder/tests/test_test_utils.py create mode 100644 cinder/tests/test_utils.py create mode 100644 cinder/tests/test_volume.py create mode 100644 cinder/tests/test_volume_configuration.py create mode 100644 cinder/tests/test_volume_glance_metadata.py create mode 100644 cinder/tests/test_volume_rpcapi.py create mode 100644 cinder/tests/test_volume_types.py create mode 100644 cinder/tests/test_volume_types_extra_specs.py create mode 100644 cinder/tests/test_volume_utils.py create mode 100644 cinder/tests/test_windows.py create mode 100644 cinder/tests/test_wsgi.py create mode 100644 cinder/tests/test_xenapi_sm.py create mode 100644 cinder/tests/test_xiv.py create mode 100644 cinder/tests/test_zadara.py create mode 100644 cinder/tests/utils.py create mode 100644 cinder/tests/var/ca.crt create mode 100644 cinder/tests/var/certificate.crt create mode 100644 cinder/tests/var/privatekey.key create mode 100644 cinder/tests/windows/__init__.py create mode 100644 cinder/tests/windows/basetestcase.py create mode 100644 cinder/tests/windows/db_fakes.py create mode 100644 cinder/tests/windows/mockproxy.py create mode 100644 cinder/tests/windows/stubs/README.rst create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_check_for_setup_errors_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_from_snapshot_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_initialize_connection_wmi.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_os.p.gz create mode 100644 cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_wmi.p.gz create mode 100644 cinder/tests/windows/windowsutils.py create mode 100644 cinder/tests/xenapi/__init__.py create mode 100644 cinder/utils.py create mode 100644 cinder/version.py create mode 100644 cinder/volume/__init__.py create mode 100644 cinder/volume/api.py create mode 100644 cinder/volume/configuration.py create mode 100644 cinder/volume/driver.py create mode 100644 cinder/volume/drivers/__init__.py create mode 100644 cinder/volume/drivers/coraid.py create mode 100644 cinder/volume/drivers/emc/__init__.py create mode 100644 cinder/volume/drivers/emc/cinder_emc_config.xml.sample create mode 100644 cinder/volume/drivers/emc/emc_smis_common.py create mode 100644 cinder/volume/drivers/emc/emc_smis_iscsi.py create mode 100644 cinder/volume/drivers/glusterfs.py create mode 100644 cinder/volume/drivers/huawei/__init__.py create mode 100644 cinder/volume/drivers/huawei/cinder_huawei_conf.xml.sample create mode 100644 cinder/volume/drivers/huawei/huawei_iscsi.py create mode 100644 cinder/volume/drivers/lvm.py create mode 100644 cinder/volume/drivers/netapp/__init__.py create mode 100644 cinder/volume/drivers/netapp/api.py create mode 100644 cinder/volume/drivers/netapp/iscsi.py create mode 100644 cinder/volume/drivers/netapp/nfs.py create mode 100644 cinder/volume/drivers/nexenta/__init__.py create mode 100644 cinder/volume/drivers/nexenta/jsonrpc.py create mode 100644 cinder/volume/drivers/nexenta/volume.py create mode 100755 cinder/volume/drivers/nfs.py create mode 100644 cinder/volume/drivers/rbd.py create mode 100644 cinder/volume/drivers/san/__init__.py create mode 100644 cinder/volume/drivers/san/hp/__init__.py create mode 100644 cinder/volume/drivers/san/hp/hp_3par_common.py create mode 100644 cinder/volume/drivers/san/hp/hp_3par_fc.py create mode 100644 cinder/volume/drivers/san/hp/hp_3par_iscsi.py create mode 100644 cinder/volume/drivers/san/hp_lefthand.py create mode 100644 cinder/volume/drivers/san/san.py create mode 100644 cinder/volume/drivers/san/solaris.py create mode 100644 cinder/volume/drivers/scality.py create mode 100644 cinder/volume/drivers/sheepdog.py create mode 100644 cinder/volume/drivers/solidfire.py create mode 100755 cinder/volume/drivers/storwize_svc.py create mode 100644 cinder/volume/drivers/windows.py create mode 100644 cinder/volume/drivers/xenapi/__init__.py create mode 100644 cinder/volume/drivers/xenapi/lib.py create mode 100644 cinder/volume/drivers/xenapi/sm.py create mode 100644 cinder/volume/drivers/xenapi/tools.py create mode 100644 cinder/volume/drivers/xiv.py create mode 100644 cinder/volume/drivers/zadara.py create mode 100644 cinder/volume/manager.py create mode 100644 cinder/volume/rpcapi.py create mode 100644 cinder/volume/utils.py create mode 100644 cinder/volume/volume_types.py create mode 100644 cinder/wsgi.py create mode 100644 contrib/redhat-eventlet.patch create mode 100644 doc/.gitignore create mode 100644 doc/Makefile create mode 100644 doc/README.rst create mode 100644 doc/ext/__init__.py create mode 100644 doc/ext/cinder_autodoc.py create mode 100644 doc/ext/cinder_todo.py create mode 100755 doc/find_autodoc_modules.sh create mode 100755 doc/generate_autodoc_index.sh create mode 100644 doc/source/_ga/layout.html create mode 100644 doc/source/_static/.gitignore create mode 100644 doc/source/_static/.placeholder create mode 100644 doc/source/_static/basic.css create mode 100644 doc/source/_static/default.css create mode 100644 doc/source/_static/jquery.tweet.js create mode 100644 doc/source/_static/tweaks.css create mode 100644 doc/source/_templates/.gitignore create mode 100644 doc/source/_templates/.placeholder create mode 100644 doc/source/_theme/layout.html create mode 100644 doc/source/_theme/theme.conf create mode 100644 doc/source/conf.py create mode 100644 doc/source/devref/addmethod.openstackapi.rst create mode 100644 doc/source/devref/api.rst create mode 100644 doc/source/devref/architecture.rst create mode 100644 doc/source/devref/auth.rst create mode 100644 doc/source/devref/cinder.rst create mode 100644 doc/source/devref/database.rst create mode 100644 doc/source/devref/development.environment.rst create mode 100644 doc/source/devref/fakes.rst create mode 100644 doc/source/devref/gerrit.rst create mode 100644 doc/source/devref/il8n.rst create mode 100644 doc/source/devref/index.rst create mode 100644 doc/source/devref/jenkins.rst create mode 100644 doc/source/devref/launchpad.rst create mode 100644 doc/source/devref/rpc.rst create mode 100644 doc/source/devref/scheduler.rst create mode 100644 doc/source/devref/services.rst create mode 100644 doc/source/devref/threading.rst create mode 100644 doc/source/devref/unit_tests.rst create mode 100644 doc/source/devref/volume.rst create mode 100644 doc/source/images/rpc/arch.png create mode 100644 doc/source/images/rpc/arch.svg create mode 100644 doc/source/images/rpc/flow1.png create mode 100644 doc/source/images/rpc/flow1.svg create mode 100644 doc/source/images/rpc/flow2.png create mode 100644 doc/source/images/rpc/flow2.svg create mode 100644 doc/source/images/rpc/rabt.png create mode 100644 doc/source/images/rpc/rabt.svg create mode 100644 doc/source/images/rpc/state.png create mode 100644 doc/source/index.rst create mode 100644 doc/source/man/cinder-manage.rst create mode 100644 etc/cinder/api-paste.ini create mode 100644 etc/cinder/cinder.conf.sample create mode 100644 etc/cinder/logging_sample.conf create mode 100644 etc/cinder/policy.json create mode 100644 etc/cinder/rootwrap.conf create mode 100644 etc/cinder/rootwrap.d/share.filters create mode 100644 etc/cinder/rootwrap.d/volume.filters create mode 100644 openstack-common.conf create mode 100644 pylintrc create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tools/conf/extract_opts.py create mode 100755 tools/conf/generate_sample.sh create mode 100755 tools/enable-pre-commit-hook.sh create mode 100644 tools/install_venv.py create mode 100644 tools/install_venv_common.py create mode 100755 tools/lintstack.py create mode 100755 tools/lintstack.sh create mode 100644 tools/patch_tox_venv.py create mode 100644 tools/pip-requires create mode 100644 tools/test-requires create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..836c0a0234 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in the "If you're a developer, start here" +section of this page: [http://wiki.openstack.org/HowToContribute](http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer.2C_start_here:) + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at [http://wiki.openstack.org/GerritWorkflow](http://wiki.openstack.org/GerritWorkflow). + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed [on Launchpad](https://bugs.launchpad.net/cinder), +not in GitHub's issue tracker. diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000000..eecb286b51 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,275 @@ +Cinder Style Commandments +======================= + +- Step 1: Read http://www.python.org/dev/peps/pep-0008/ +- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again +- Step 3: Read on + + +General +------- +- Put two newlines between top-level code (funcs, classes, etc) +- Put one newline between methods in classes and anywhere else +- Long lines should be wrapped in parentheses + in preference to using a backslash for line continuation. +- Do not write "except:", use "except Exception:" at the very least +- Include your name with TODOs as in "#TODO(termie)" +- Do not shadow a built-in or reserved word. Example:: + + def list(): + return [1, 2, 3] + + mylist = list() # BAD, shadows `list` built-in + + class Foo(object): + def list(self): + return [1, 2, 3] + + mylist = Foo().list() # OKAY, does not shadow built-in + +- Use the "is not" operator when testing for unequal identities. Example:: + + if not X is Y: # BAD, intended behavior is ambiguous + pass + + if X is not Y: # OKAY, intuitive + pass + +- Use the "not in" operator for evaluating membership in a collection. Example:: + + if not X in Y: # BAD, intended behavior is ambiguous + pass + + if X not in Y: # OKAY, intuitive + pass + + if not (X in Y or X in Z): # OKAY, still better than all those 'not's + pass + + +Imports +------- +- Do not import objects, only modules (*) +- Do not import more than one module per line (*) +- Do not make relative imports +- Order your imports by the full module path +- Organize your imports according to the following template + +(*) exceptions are: + +- imports from ``migrate`` package +- imports from ``sqlalchemy`` package +- imports from ``cinder.db.sqlalchemy.session`` module + +Example:: + + # vim: tabstop=4 shiftwidth=4 softtabstop=4 + {{stdlib imports in human alphabetical order}} + \n + {{third-party lib imports in human alphabetical order}} + \n + {{cinder imports in human alphabetical order}} + \n + \n + {{begin your code}} + + +Human Alphabetical Order Examples +--------------------------------- +Example:: + + import httplib + import logging + import random + import StringIO + import time + import unittest + + import eventlet + import webob.exc + + import cinder.api.ec2 + from cinder.api import openstack + from cinder.auth import users + from cinder.endpoint import cloud + import cinder.flags + from cinder import test + + +Docstrings +---------- +Example:: + + """A one line docstring looks like this and ends in a period.""" + + + """A multi line docstring has a one-line summary, less than 80 characters. + + Then a new paragraph after a newline that explains in more detail any + general information about the function, class or method. Example usages + are also great to have here if it is a complex class for function. + + When writing the docstring for a class, an extra line should be placed + after the closing quotations. For more in-depth explanations for these + decisions see http://www.python.org/dev/peps/pep-0257/ + + If you are going to describe parameters and return values, use Sphinx, the + appropriate syntax is as follows. + + :param foo: the foo parameter + :param bar: the bar parameter + :returns: return_type -- description of the return value + :returns: description of the return value + :raises: AttributeError, KeyError + """ + + +Dictionaries/Lists +------------------ +If a dictionary (dict) or list object is longer than 80 characters, its items +should be split with newlines. Embedded iterables should have their items +indented. Additionally, the last item in the dictionary should have a trailing +comma. This increases readability and simplifies future diffs. + +Example:: + + my_dictionary = { + "image": { + "name": "Just a Snapshot", + "size": 2749573, + "properties": { + "user_id": 12, + "arch": "x86_64", + }, + "things": [ + "thing_one", + "thing_two", + ], + "status": "ACTIVE", + }, + } + + +Calling Methods +--------------- +Calls to methods 80 characters or longer should format each argument with +newlines. This is not a requirement, but a guideline:: + + unnecessarily_long_function_name('string one', + 'string two', + kwarg1=constants.ACTIVE, + kwarg2=['a', 'b', 'c']) + + +Rather than constructing parameters inline, it is better to break things up:: + + list_of_strings = [ + 'what_a_long_string', + 'not as long', + ] + + dict_of_numbers = { + 'one': 1, + 'two': 2, + 'twenty four': 24, + } + + object_one.call_a_method('string three', + 'string four', + kwarg1=list_of_strings, + kwarg2=dict_of_numbers) + + +Internationalization (i18n) Strings +----------------------------------- +In order to support multiple languages, we have a mechanism to support +automatic translations of exception and log strings. + +Example:: + + msg = _("An error occurred") + raise HTTPBadRequest(explanation=msg) + +If you have a variable to place within the string, first internationalize the +template string then do the replacement. + +Example:: + + msg = _("Missing parameter: %s") % ("flavor",) + LOG.error(msg) + +If you have multiple variables to place in the string, use keyword parameters. +This helps our translators reorder parameters when needed. + +Example:: + + msg = _("The server with id %(s_id)s has no key %(m_key)s") + LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) + + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Cinder, please read cinder/testing/README.rst. + + +openstack-common +---------------- + +A number of modules from openstack-common are imported into the project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. + +OpenStack Trademark +------------------- + +OpenStack is a registered trademark of OpenStack, LLC, and uses the +following capitalization: + + OpenStack + + +Commit Messages +--------------- +Using a common format for commit messages will help keep our git history +readable. Follow these guidelines: + + First, provide a brief summary (it is recommended to keep the commit title + under 50 chars). + + The first line of the commit message should provide an accurate + description of the change, not just a reference to a bug or + blueprint. It must be followed by a single blank line. + + If the change relates to a specific driver (libvirt, xenapi, qpid, etc...), + begin the first line of the commit message with the driver name, lowercased, + followed by a colon. + + Following your brief summary, provide a more detailed description of + the patch, manually wrapping the text at 72 characters. This + description should provide enough detail that one does not have to + refer to external resources to determine its high-level functionality. + + Once you use 'git review', two lines will be appended to the commit + message: a blank line followed by a 'Change-Id'. This is important + to correlate this commit with a specific review in Gerrit, and it + should not be modified. + +For further information on constructing high quality commit messages, +and how to split up commits into a series of changes, consult the +project wiki: + + http://wiki.openstack.org/GitCommitMessages diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..68c771a099 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..c978a52dae --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..822401a80a --- /dev/null +++ b/README.rst @@ -0,0 +1,21 @@ +The Choose Your Own Adventure README for Cinder +=============================================== + +You have come across a storage service for an open cloud computing service. +It has identified itself as "Cinder." It was abstracted from the Nova project. + +To monitor it from a distance: follow `@openstack `_ on twitter. + +To tame it for use in your own cloud: read http://docs.openstack.org + +To study its anatomy: read http://cinder.openstack.org + +To dissect it in detail: visit http://github.com/openstack/cinder + +To taunt it with its weaknesses: use http://bugs.launchpad.net/cinder + +To watch it: http://jenkins.openstack.org + +To hack at it: read HACKING + +To cry over its pylint problems: http://jenkins.openstack.org/job/cinder-pylint/violations diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000000..15cd6cb76b --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/cinder-all b/bin/cinder-all new file mode 100755 index 0000000000..9591d1574f --- /dev/null +++ b/bin/cinder-all @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack, LLC +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for All cinder services. + +This script attempts to start all the cinder services in one process. Each +service is started in its own greenthread. Please note that exceptions and +sys.exit() on the starting of a service are logged and the script will +continue attempting to launch the rest of the services. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + LOG = logging.getLogger('cinder.all') + + utils.monkey_patch() + servers = [] + # cinder-api + try: + servers.append(service.WSGIService('osapi_volume')) + except (Exception, SystemExit): + LOG.exception(_('Failed to load osapi_volume')) + + for binary in ['cinder-volume', 'cinder-scheduler']: + try: + servers.append(service.Service.create(binary=binary)) + except (Exception, SystemExit): + LOG.exception(_('Failed to load %s'), binary) + service.serve(*servers) + service.wait() diff --git a/bin/cinder-api b/bin/cinder-api new file mode 100755 index 0000000000..0f05b519c8 --- /dev/null +++ b/bin/cinder-api @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Cinder OS API.""" + +# NOTE(jdg): If we port over multi worker code from Nova +# we'll need to set monkey_patch(os=False), unless +# eventlet is updated/released to fix the root issue + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + server = service.WSGIService('osapi_volume') + service.serve(server) + service.wait() diff --git a/bin/cinder-backup b/bin/cinder-backup new file mode 100755 index 0000000000..71f83006a2 --- /dev/null +++ b/bin/cinder-backup @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume Backup.""" + +import os +import sys + +import eventlet + +eventlet.monkey_patch() + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + launcher = service.ProcessLauncher() + server = service.Service.create(binary='cinder-backup') + launcher.launch_server(server) + launcher.wait() diff --git a/bin/cinder-clear-rabbit-queues b/bin/cinder-clear-rabbit-queues new file mode 100755 index 0000000000..684a56578a --- /dev/null +++ b/bin/cinder-clear-rabbit-queues @@ -0,0 +1,76 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Admin/debug script to wipe rabbitMQ (AMQP) queues cinder uses. + This can be used if you need to change durable options on queues, + or to wipe all messages in the queue system if things are in a + serious bad way. + +""" + +import datetime +import os +import sys +import time + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +delete_exchange_opt = \ + cfg.BoolOpt('delete_exchange', + default=False, + help='delete cinder exchange too.') + +FLAGS = flags.FLAGS +FLAGS.register_cli_opt(delete_exchange_opt) + + +def delete_exchange(exch): + conn = rpc.create_connection() + x = conn.get_channel() + x.exchange_delete(exch) + + +def delete_queues(queues): + conn = rpc.create_connection() + x = conn.get_channel() + for q in queues: + x.queue_delete(q) + +if __name__ == '__main__': + args = flags.parse_args(sys.argv) + logging.setup("cinder") + delete_queues(args[1:]) + if FLAGS.delete_exchange: + delete_exchange(FLAGS.control_exchange) diff --git a/bin/cinder-manage b/bin/cinder-manage new file mode 100755 index 0000000000..63f638fc43 --- /dev/null +++ b/bin/cinder-manage @@ -0,0 +1,820 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Interactive shell based on Django: +# +# Copyright (c) 2005, the Lawrence Journal-World +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of Django nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +""" + CLI interface for cinder management. +""" + +import os +import sys +import uuid + +from sqlalchemy import create_engine, MetaData, Table +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder.db import migration +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import version + +FLAGS = flags.FLAGS + + +# Decorators for actions +def args(*args, **kwargs): + def _decorator(func): + func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) + return func + return _decorator + + +def param2id(object_id): + """Helper function to convert various id types to internal id. + args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' + """ + if uuidutils.is_uuid_like(object_id): + return object_id + elif '-' in object_id: + # FIXME(ja): mapping occurs in nova? + pass + else: + return int(object_id) + + +class ShellCommands(object): + def bpython(self): + """Runs a bpython shell. + + Falls back to Ipython/python shell if unavailable""" + self.run('bpython') + + def ipython(self): + """Runs an Ipython shell. + + Falls back to Python shell if unavailable""" + self.run('ipython') + + def python(self): + """Runs a python shell. + + Falls back to Python shell if unavailable""" + self.run('python') + + @args('--shell', dest="shell", + metavar='', + help='Python shell') + def run(self, shell=None): + """Runs a Python interactive interpreter.""" + if not shell: + shell = 'bpython' + + if shell == 'bpython': + try: + import bpython + bpython.embed() + except ImportError: + shell = 'ipython' + if shell == 'ipython': + try: + import IPython + # Explicitly pass an empty list as arguments, because + # otherwise IPython would use sys.argv from this script. + shell = IPython.Shell.IPShell(argv=[]) + shell.mainloop() + except ImportError: + shell = 'python' + + if shell == 'python': + import code + try: + # Try activating rlcompleter, because it's handy. + import readline + except ImportError: + pass + else: + # We don't have to wrap the following import in a 'try', + # because we already know 'readline' was imported successfully. + import rlcompleter + readline.parse_and_bind("tab:complete") + code.interact() + + @args('--path', required=True, help='Script path') + def script(self, path): + """Runs the script from the specifed path with flags set properly. + arguments: path""" + exec(compile(open(path).read(), path, 'exec'), locals(), globals()) + + +def _db_error(caught_exception): + print caught_exception + print _("The above error may show that the database has not " + "been created.\nPlease create a database using " + "'cinder-manage db sync' before running this command.") + exit(1) + + +class HostCommands(object): + """List hosts.""" + + @args('zone', nargs='?', default=None, + help='Availability Zone (default: %(default)s)') + def list(self, zone=None): + """Show a list of all physical hosts. Filter by zone. + args: [zone]""" + print "%-25s\t%-15s" % (_('host'), + _('zone')) + ctxt = context.get_admin_context() + services = db.service_get_all(ctxt) + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for srv in services: + if not [h for h in hosts if h['host'] == srv['host']]: + hosts.append(srv) + + for h in hosts: + print "%-25s\t%-15s" % (h['host'], h['availability_zone']) + + +class DbCommands(object): + """Class for managing the database.""" + + def __init__(self): + pass + + @args('version', nargs='?', default=None, + help='Database version') + def sync(self, version=None): + """Sync the database up to the most recent version.""" + return migration.db_sync(version) + + def version(self): + """Print the current database version.""" + print migration.db_version() + + +class VersionCommands(object): + """Class for exposing the codebase version.""" + + def __init__(self): + pass + + def list(self): + print(version.version_string()) + + def __call__(self): + self.list() + + +class ImportCommands(object): + """Methods for importing Nova volumes to Cinder. + + EXPECTATIONS: + These methods will do two things: + 1. Import relevant Nova DB info in to Cinder + 2. Import persistent tgt files from Nova to Cinder (see copy_tgt_files) + + If you're using VG's (local storage) for your backend YOU MUST install + Cinder on the same node that you're migrating from. + """ + def __init__(self): + pass + + def _map_table(self, table): + class Mapper(declarative_base()): + __table__ = table + return Mapper + + def _open_session(self, con_info): + # Note(jdg): The echo option below sets whether to dispaly db command + # debug info. + engine = create_engine(con_info, + convert_unicode=True, + echo=False) + session = sessionmaker(bind=engine) + return (session(), engine) + + def _backup_cinder_db(self): + #First, dump the dest_db as a backup incase this goes wrong + cinder_dump = utils.execute('mysqldump', 'cinder') + if 'Dump completed on' in cinder_dump[0]: + with open('./cinder_db_bkup.sql', 'w+') as fo: + for line in cinder_dump: + fo.write(line) + else: + raise exception.InvalidResults() + + def _import_db(self, src_db, dest_db, backup_db): + # Remember order matters due to FK's + table_list = ['sm_flavors', + 'sm_backend_config', + 'snapshots', + 'volume_types', + 'volumes', + 'iscsi_targets', + 'sm_volume', + 'volume_metadata', + 'volume_type_extra_specs'] + + quota_table_list = ['quota_classes', + 'quota_usages', + 'quotas', + 'reservations'] + + if backup_db > 0: + if 'mysql:' not in dest_db: + print (_('Sorry, only mysql backups are supported!')) + raise exception.InvalidRequest() + else: + self._backup_cinder_db() + + (src, src_engine) = self._open_session(src_db) + src_meta = MetaData(bind=src_engine) + (dest, dest_engine) = self._open_session(dest_db) + + # First make sure nova is at Folsom + table = Table('migrate_version', src_meta, autoload=True) + if src.query(table).first().version < 132: + print (_('ERROR: Specified Nova DB is not at a compatible ' + 'migration version!\nNova must be at Folsom or newer ' + 'to import into Cinder database.')) + sys.exit(2) + + for table_name in table_list: + print (_('Importing table %s...') % table_name) + table = Table(table_name, src_meta, autoload=True) + new_row = self._map_table(table) + columns = table.columns.keys() + for row in src.query(table).all(): + data = dict([(str(column), getattr(row, column)) + for column in columns]) + dest.add(new_row(**data)) + dest.commit() + + for table_name in quota_table_list: + print (_('Importing table %s...') % table_name) + table = Table(table_name, src_meta, autoload=True) + new_row = self._map_table(table) + columns = table.columns.keys() + for row in src.query(table).all(): + if row.resource == 'gigabytes' or row.resource == 'volumes': + data = dict([(str(column), getattr(row, column)) + for column in columns]) + dest.add(new_row(**data)) + dest.commit() + + @args('src', metavar='', + help='db-engine://db_user[:passwd]@db_host[:port]\t\t' + 'example: mysql://root:secrete@192.168.137.1') + @args('dest', metavar='', + help='db-engine://db_user[:passwd]@db_host[:port]\t\t' + 'example: mysql://root:secrete@192.168.137.1') + @args('--backup', metavar='<0|1>', choices=[0, 1], default=1, + help='Perform mysqldump of cinder db before writing to it' + ' (default: %(default)d)') + def import_db(self, src_db, dest_db, backup_db=1): + """Import relevant volume DB entries from Nova into Cinder. + + NOTE: + Your Cinder DB should be clean WRT volume entries. + + NOTE: + We take an sqldump of the cinder DB before mods + If you're not using mysql, set backup_db=0 + and create your own backup. + """ + src_db = '%s/nova' % src_db + dest_db = '%s/cinder' % dest_db + self._import_db(src_db, dest_db, backup_db) + + @args('src', + help='e.g. (login@src_host:]/opt/stack/nova/volumes/)') + @args('dest', nargs='?', default=None, + help='e.g. (login@src_host:/opt/stack/cinder/volumes/) ' + 'optional, if emitted, \'volume_dir\' in config will be used') + def copy_ptgt_files(self, src_tgts, dest_tgts=None): + """Copy persistent scsi tgt files from nova to cinder. + + Default destination is FLAGS.volume_dir or state_path/volumes/ + + PREREQUISITES: + Persistent tgts were introduced in Folsom. If you're running + Essex or other release, this script is unnecessary. + + NOTE: + If you're using local VG's and LVM for your nova volume backend + there's no point in copying these files over. Leave them on + your Nova system as they won't do any good here. + """ + if dest_tgts is None: + try: + dest_tgts = FLAGS.volumes_dir + except Exception: + dest_tgts = '%s/volumes' % FLAGS.state_path + + utils.execute('rsync', '-avz', src_tgts, dest_tgts) + + +class VolumeCommands(object): + """Methods for dealing with a cloud in an odd state.""" + + @args('volume_id', + help='Volume ID to be deleted') + def delete(self, volume_id): + """Delete a volume, bypassing the check that it + must be available.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + host = volume['host'] + + if not host: + print "Volume not yet assigned to host." + print "Deleting volume from database and skipping rpc." + db.volume_destroy(ctxt, param2id(volume_id)) + return + + if volume['status'] == 'in-use': + print "Volume is in-use." + print "Detach volume from instance and then try again." + return + + rpc.cast(ctxt, + rpc.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + @args('volume_id', + help='Volume ID to be reattached') + def reattach(self, volume_id): + """Re-attach a volume that has previously been attached + to an instance. Typically called after a compute host + has been rebooted.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + if not volume['instance_id']: + print "volume is not attached to an instance" + return + instance = db.instance_get(ctxt, volume['instance_id']) + host = instance['host'] + rpc.cast(ctxt, + rpc.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume['id'], + "mountpoint": volume['mountpoint']}}) + + +class StorageManagerCommands(object): + """Class for mangaging Storage Backends and Flavors.""" + + @args('flavor', nargs='?', + help='flavor to be listed') + def flavor_list(self, flavor=None): + ctxt = context.get_admin_context() + + try: + if flavor is None: + flavors = db.sm_flavor_get_all(ctxt) + else: + flavors = db.sm_flavor_get(ctxt, flavor) + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-18s\t%-20s\t%s" % (_('id'), + _('Label'), + _('Description')) + + for flav in flavors: + print "%-18s\t%-20s\t%s" % ( + flav['id'], + flav['label'], + flav['description']) + + @args('label', help='flavor label') + @args('desc', help='flavor description') + def flavor_create(self, label, desc): + # TODO(renukaapte) flavor name must be unique + try: + db.sm_flavor_create(context.get_admin_context(), + dict(label=label, + description=desc)) + except exception.DBError, e: + _db_error(e) + + @args('label', help='label of flavor to be deleted') + def flavor_delete(self, label): + try: + db.sm_flavor_delete(context.get_admin_context(), label) + + except exception.DBError, e: + _db_error(e) + + def _splitfun(self, item): + i = item.split("=") + return i[0:2] + + @args('backend_conf_id', nargs='?', default=None) + def backend_list(self, backend_conf_id=None): + ctxt = context.get_admin_context() + + try: + if backend_conf_id is None: + backends = db.sm_backend_conf_get_all(ctxt) + else: + backends = db.sm_backend_conf_get(ctxt, backend_conf_id) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'), + _('Flavor id'), + _('SR UUID'), + _('SR Type'), + _('Config Parameters'),) + + for b in backends: + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'], + b['flavor_id'], + b['sr_uuid'], + b['sr_type'], + b['config_params'],) + + @args('flavor_label') + @args('sr_type') + @args('args', nargs='*') + def backend_add(self, flavor_label, sr_type, *args): + # TODO(renukaapte) Add backend_introduce. + ctxt = context.get_admin_context() + params = dict(map(self._splitfun, args)) + sr_uuid = uuid.uuid4() + + if flavor_label is None: + print "error: backend needs to be associated with flavor" + sys.exit(2) + + try: + flavors = db.sm_flavor_get(ctxt, flavor_label) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + config_params = " ".join( + ['%s=%s' % (key, params[key]) for key in params]) + + if 'sr_uuid' in params: + sr_uuid = params['sr_uuid'] + try: + backend = db.sm_backend_conf_get_by_sr(ctxt, sr_uuid) + except exception.DBError, e: + _db_error(e) + + if backend: + print 'Backend config found. Would you like to recreate this?' + print '(WARNING:Recreating will destroy all VDIs on backend!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_update( + ctxt, backend['id'], + dict(created=False, + flavor_id=flavors['id'], + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + return + + else: + print 'Backend config not found. Would you like to create it?' + + print '(WARNING: Creating will destroy all data on backend!!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_create(ctxt, + dict(flavor_id=flavors['id'], + sr_uuid=sr_uuid, + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + + @args('backend_conf_id') + def backend_remove(self, backend_conf_id): + try: + db.sm_backend_conf_delete(context.get_admin_context(), + backend_conf_id) + + except exception.DBError, e: + _db_error(e) + + +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + for key, value in FLAGS.iteritems(): + if value is not None: + print '%s = %s' % (key, value) + + +class GetLogCommands(object): + """Get logging information.""" + + def errors(self): + """Get all of the errors from the log files.""" + error_found = 0 + if FLAGS.log_dir: + logs = [x for x in os.listdir(FLAGS.log_dir) if x.endswith('.log')] + for file in logs: + log_file = os.path.join(FLAGS.log_dir, file) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print_name = 0 + for index, line in enumerate(lines): + if line.find(" ERROR ") > 0: + error_found += 1 + if print_name == 0: + print log_file + ":-" + print_name = 1 + print "Line %d : %s" % (len(lines) - index, line) + if error_found == 0: + print "No errors in logfiles!" + + @args('num_entries', nargs='?', type=int, default=10, + help='Number of entries to list (default: %(default)d)') + def syslog(self, num_entries=10): + """Get of the cinder syslog events.""" + entries = int(num_entries) + count = 0 + log_file = '' + if os.path.exists('/var/log/syslog'): + log_file = '/var/log/syslog' + elif os.path.exists('/var/log/messages'): + log_file = '/var/log/messages' + else: + print "Unable to find system log file!" + sys.exit(1) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print "Last %s cinder syslog entries:-" % (entries) + for line in lines: + if line.find("cinder") > 0: + count += 1 + print "%s" % (line) + if count == entries: + break + + if count == 0: + print "No cinder entries in syslog!" + + +class BackupCommands(object): + """Methods for managing backups.""" + + def list(self): + """List all backups (including ones in progress) and the host + on which the backup operation is running.""" + ctxt = context.get_admin_context() + backups = db.backup_get_all(ctxt) + + hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" + print hdr % (_('ID'), + _('User ID'), + _('Project ID'), + _('Host'), + _('Name'), + _('Container'), + _('Status'), + _('Size'), + _('Object Count')) + + res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" + for backup in backups: + object_count = 0 + if backup['object_count'] is not None: + object_count = backup['object_count'] + print res % (backup['id'], + backup['user_id'], + backup['project_id'], + backup['host'], + backup['display_name'], + backup['container'], + backup['status'], + backup['size'], + object_count) + + +class ServiceCommands(object): + """Methods for managing services.""" + def list(self): + """Show a list of all cinder services.""" + ctxt = context.get_admin_context() + services = db.service_get_all(ctxt) + print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" + print print_format % ( + _('Binary'), + _('Host'), + _('Zone'), + _('Status'), + _('State'), + _('Updated At')) + for svc in services: + alive = utils.service_is_up(svc) + art = ":-)" if alive else "XXX" + status = 'enabled' + if svc['disabled']: + status = 'disabled' + print print_format % (svc['binary'], svc['host'].partition('.')[0], + svc['availability_zone'], status, art, + svc['updated_at']) + + +CATEGORIES = { + 'backup': BackupCommands, + 'config': ConfigCommands, + 'db': DbCommands, + 'host': HostCommands, + 'logs': GetLogCommands, + 'service': ServiceCommands, + 'shell': ShellCommands, + 'sm': StorageManagerCommands, + 'version': VersionCommands, + 'volume': VolumeCommands, + 'migrate': ImportCommands, +} + + +def methods_of(obj): + """Get all callable methods of an object that don't start with underscore + returns a list of tuples of the form (method_name, method)""" + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result + + +def add_command_parsers(subparsers): + for category in CATEGORIES: + command_object = CATEGORIES[category]() + + parser = subparsers.add_parser(category) + parser.set_defaults(command_object=command_object) + + category_subparsers = parser.add_subparsers(dest='action') + + for (action, action_fn) in methods_of(command_object): + parser = category_subparsers.add_parser(action) + + action_kwargs = [] + for args, kwargs in getattr(action_fn, 'args', []): + parser.add_argument(*args, **kwargs) + + parser.set_defaults(action_fn=action_fn) + parser.set_defaults(action_kwargs=action_kwargs) + + +category_opt = cfg.SubCommandOpt('category', + title='Command categories', + handler=add_command_parsers) + + +def get_arg_string(args): + arg = None + if args[0] == '-': + # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars + # is optional args. Notice that cfg module takes care of + # actual ArgParser so prefix_chars is always '-'. + if args[1] == '-': + # This is long optional arg + arg = args[2:] + else: + arg = args[3:] + else: + arg = args + + return arg + + +def fetch_func_args(func): + fn_args = [] + for args, kwargs in getattr(func, 'args', []): + arg = get_arg_string(args[0]) + fn_args.append(getattr(FLAGS.category, arg)) + + return fn_args + + +def main(): + """Parse options and call the appropriate class/method.""" + FLAGS.register_cli_opt(category_opt) + script_name = sys.argv[0] + if len(sys.argv) < 2: + print(_("\nOpenStack Cinder version: %(version)s\n") % + {'version': version.version_string()}) + print script_name + " category action []" + print _("Available categories:") + for category in CATEGORIES: + print "\t%s" % category + sys.exit(2) + + try: + flags.parse_args(sys.argv) + logging.setup("cinder") + except cfg.ConfigFilesNotFoundError: + cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None + if cfgfile and not os.access(cfgfile, os.R_OK): + st = os.stat(cfgfile) + print _("Could not read %s. Re-running with sudo") % cfgfile + try: + os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) + except Exception: + print _('sudo failed, continuing as if nothing happened') + + print _('Please re-run cinder-manage as root.') + sys.exit(2) + + fn = FLAGS.category.action_fn + + fn_args = fetch_func_args(fn) + fn(*fn_args) + +if __name__ == '__main__': + main() diff --git a/bin/cinder-rootwrap b/bin/cinder-rootwrap new file mode 100755 index 0000000000..cb2a92f5ea --- /dev/null +++ b/bin/cinder-rootwrap @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for OpenStack services + + Filters which commands a service is allowed to run as another user. + + To use this with cinder, you should set the following in + cinder.conf: + rootwrap_config=/etc/cinder/rootwrap.conf + + You also need to let the cinder user run cinder-rootwrap + as root in sudoers: + cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap + /etc/cinder/rootwrap.conf * + + Service packaging should deploy .filters files only on nodes where + they are needed, to avoid allowing more than is necessary. +""" + +import ConfigParser +import logging +import os +import pwd +import signal +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_NOEXECFOUND = 96 + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def _exit_error(execname, message, errorcode, log=True): + print "%s: %s" % (execname, message) + if log: + logging.error(message) + sys.exit(errorcode) + + +if __name__ == '__main__': + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) < 2: + _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) + + configfile = sys.argv.pop(0) + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from cinder.openstack.common.rootwrap import wrapper + + # Load configuration + try: + rawconfig = ConfigParser.RawConfigParser() + rawconfig.read(configfile) + config = wrapper.RootwrapConfig(rawconfig) + except ValueError as exc: + msg = "Incorrect value in %s: %s" % (configfile, exc.message) + _exit_error(execname, msg, RC_BADCONFIG, log=False) + except ConfigParser.Error: + _exit_error(execname, "Incorrect configuration file: %s" % configfile, + RC_BADCONFIG, log=False) + + if config.use_syslog: + wrapper.setup_syslog(execname, + config.syslog_log_facility, + config.syslog_log_level) + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(config.filters_path) + try: + filtermatch = wrapper.match_filter(filters, userargs, + exec_dirs=config.exec_dirs) + if filtermatch: + command = filtermatch.get_command(userargs, + exec_dirs=config.exec_dirs) + if config.use_syslog: + logging.info("(%s > %s) Executing %s (filter match = %s)" % ( + os.getlogin(), pwd.getpwuid(os.getuid())[0], + command, filtermatch.name)) + + obj = subprocess.Popen(command, + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + preexec_fn=_subprocess_setup, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + except wrapper.FilterMatchNotExecutable as exc: + msg = ("Executable not found: %s (filter match = %s)" + % (exc.match.exec_path, exc.match.name)) + _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) + + except wrapper.NoFilterMatched: + msg = ("Unauthorized command: %s (no filter matched)" + % ' '.join(userargs)) + _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) diff --git a/bin/cinder-rpc-zmq-receiver b/bin/cinder-rpc-zmq-receiver new file mode 100755 index 0000000000..e8dbf0effe --- /dev/null +++ b/bin/cinder-rpc-zmq-receiver @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +eventlet.monkey_patch() + +import contextlib +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import impl_zmq + +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) + + +def main(): + CONF(sys.argv[1:], project='cinder') + logging.setup("cinder") + + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() + +if __name__ == '__main__': + main() diff --git a/bin/cinder-scheduler b/bin/cinder-scheduler new file mode 100755 index 0000000000..28edd8bbf9 --- /dev/null +++ b/bin/cinder-scheduler @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Scheduler.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + server = service.Service.create(binary='cinder-scheduler') + service.serve(server) + service.wait() diff --git a/bin/cinder-share b/bin/cinder-share new file mode 100755 index 0000000000..b3ba88d398 --- /dev/null +++ b/bin/cinder-share @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Share.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +FLAGS = flags.FLAGS + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + launcher = service.ProcessLauncher() + if FLAGS.enabled_share_backends: + for backend in FLAGS.enabled_share_backends: + host = "%s@%s" % (FLAGS.host, backend) + server = service.Service.create( + host=host, + service_name=backend) + launcher.launch_server(server) + else: + server = service.Service.create(binary='cinder-share') + launcher.launch_server(server) + launcher.wait() diff --git a/bin/cinder-volume b/bin/cinder-volume new file mode 100755 index 0000000000..9c36238d66 --- /dev/null +++ b/bin/cinder-volume @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils + +FLAGS = flags.FLAGS + +if __name__ == '__main__': + flags.parse_args(sys.argv) + logging.setup("cinder") + utils.monkey_patch() + launcher = service.ProcessLauncher() + if FLAGS.enabled_backends: + for backend in FLAGS.enabled_backends: + host = "%s@%s" % (FLAGS.host, backend) + server = service.Service.create( + host=host, + service_name=backend) + launcher.launch_server(server) + else: + server = service.Service.create(binary='cinder-volume') + launcher.launch_server(server) + launcher.wait() diff --git a/bin/cinder-volume-usage-audit b/bin/cinder-volume-usage-audit new file mode 100755 index 0000000000..13ea447a9f --- /dev/null +++ b/bin/cinder-volume-usage-audit @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cron script to generate usage notifications for volumes existing during + the audit period. + + Together with the notifications generated by volumes + create/delete/resize, over that time period, this allows an external + system consuming usage notification feeds to calculate volume usage + for each tenant. + + Time periods are specified as 'hour', 'month', 'day' or 'year' + + hour = previous hour. If run at 9:07am, will generate usage for 8-9am. + month = previous month. If the script is run April 1, it will generate + usages for March 1 through March 31. + day = previous day. if run on July 4th, it generates usages for July 3rd. + year = previous year. If run on Jan 1, it generates usages for + Jan 1 through Dec 31 of the previous year. +""" + +import os +import sys +import traceback + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder import utils +import cinder.volume.utils + + +FLAGS = flags.FLAGS + +if __name__ == '__main__': + admin_context = context.get_admin_context() + flags.parse_args(sys.argv) + logging.setup("cinder") + begin, end = utils.last_completed_audit_period() + print _("Starting volume usage audit") + msg = _("Creating usages for %(begin_period)s until %(end_period)s") + print (msg % {"begin_period": str(begin), "end_period": str(end)}) + + extra_info = { + 'audit_period_beginning': str(begin), + 'audit_period_ending': str(end), + } + + volumes = db.volume_get_active_by_window(admin_context, + begin, + end) + print _("Found %d volumes") % len(volumes) + for volume_ref in volumes: + try: + cinder.volume.utils.notify_usage_exists( + admin_context, volume_ref) + except Exception, e: + print traceback.format_exc(e) + + snapshots = db.snapshot_get_active_by_window(admin_context, + begin, + end) + print _("Found %d snapshots") % len(snapshots) + for snapshot_ref in snapshots: + try: + cinder.volume.utils.notify_about_snapshot_usage(admin_context, + snapshot_ref, + 'exists', + extra_info) + except Exception, e: + print traceback.fromat_exc(e) + + print _("Volume usage audit completed") diff --git a/cinder/__init__.py b/cinder/__init__.py new file mode 100644 index 0000000000..d765b088e9 --- /dev/null +++ b/cinder/__init__.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder` -- Cloud IaaS Platform +=================================== + +.. automodule:: cinder + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py new file mode 100644 index 0000000000..fc348ac565 --- /dev/null +++ b/cinder/api/__init__.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap + +from cinder import flags + + +FLAGS = flags.FLAGS + + +def root_app_factory(loader, global_conf, **local_conf): + if not FLAGS.enable_v1_api: + del local_conf['/v1'] + if not FLAGS.enable_v2_api: + del local_conf['/v2'] + return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/auth.py b/cinder/api/auth.py new file mode 100644 index 0000000000..ac673cf71f --- /dev/null +++ b/cinder/api/auth.py @@ -0,0 +1,36 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.middleware import auth +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class CinderKeystoneContext(auth.CinderKeystoneContext): + def __init__(self, application): + LOG.warn(_('cinder.api.auth:CinderKeystoneContext is deprecated. ' + 'Please use ' + 'cinder.api.middleware.auth:CinderKeystoneContext ' + 'instead.')) + super(CinderKeystoneContext, self).__init__(application) + + +def pipeline_factory(loader, global_conf, **local_conf): + LOG.warn(_('cinder.api.auth:pipeline_factory is deprecated. Please use ' + 'cinder.api.middleware.auth:pipeline_factory instead.')) + auth.pipeline_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/common.py b/cinder/api/common.py new file mode 100644 index 0000000000..94bf594c00 --- /dev/null +++ b/cinder/api/common.py @@ -0,0 +1,314 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import urlparse + +import webob + +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1' + + +def get_pagination_params(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If 'limit' is not specified, 0, or + > max_limit, we default to max_limit. Negative values + for either marker or limit will cause + exc.HTTPBadRequest() exceptions to be raised. + + """ + params = {} + if 'limit' in request.GET: + params['limit'] = _get_limit_param(request) + if 'marker' in request.GET: + params['marker'] = _get_marker_param(request) + return params + + +def _get_limit_param(request): + """Extract integer limit from request or fail""" + try: + limit = int(request.GET['limit']) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + return limit + + +def _get_marker_param(request): + """Extract marker id from request or fail""" + return request.GET['marker'] + + +def limited(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to requested offset and limit. + + :param items: A sliceable entity + :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. Negative values for either offset or limit + will cause exc.HTTPBadRequest() exceptions to be raised. + :kwarg max_limit: The maximum number of items to return from 'items' + """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + msg = _('offset param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if offset < 0: + msg = _('offset param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + limit = min(max_limit, limit or max_limit) + range_end = offset + limit + return items[offset:range_end] + + +def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to the requested marker and limit.""" + params = get_pagination_params(request) + + limit = params.get('limit', max_limit) + marker = params.get('marker') + + limit = min(max_limit, limit) + start_index = 0 + if marker: + start_index = -1 + for i, item in enumerate(items): + if 'flavorid' in item: + if item['flavorid'] == marker: + start_index = i + 1 + break + elif item['id'] == marker or item.get('uuid') == marker: + start_index = i + 1 + break + if start_index < 0: + msg = _('marker [%s] not found') % marker + raise webob.exc.HTTPBadRequest(explanation=msg) + range_end = start_index + limit + return items[start_index:range_end] + + +def remove_version_from_href(href): + """Removes the first api version from the href. + + Given: 'http://www.cinder.com/v1.1/123' + Returns: 'http://www.cinder.com/123' + + Given: 'http://www.cinder.com/v1.1' + Returns: 'http://www.cinder.com' + + """ + parsed_url = urlparse.urlsplit(href) + url_parts = parsed_url.path.split('/', 2) + + # NOTE: this should match vX.X or vX + expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') + if expression.match(url_parts[1]): + del url_parts[1] + + new_path = '/'.join(url_parts) + + if new_path == parsed_url.path: + msg = _('href %s does not contain version') % href + LOG.debug(msg) + raise ValueError(msg) + + parsed_url = list(parsed_url) + parsed_url[2] = new_path + return urlparse.urlunsplit(parsed_url) + + +def dict_to_query_str(params): + # TODO(throughnothing): we should just use urllib.urlencode instead of this + # But currently we don't work with urlencoded url's + param_str = "" + for key, val in params.iteritems(): + param_str = param_str + '='.join([str(key), str(val)]) + '&' + + return param_str.rstrip('&') + + +class ViewBuilder(object): + """Model API responses as dictionaries.""" + + _collection_name = None + + def _get_links(self, request, identifier): + return [{"rel": "self", + "href": self._get_href_link(request, identifier), }, + {"rel": "bookmark", + "href": self._get_bookmark_link(request, identifier), }] + + def _get_next_link(self, request, identifier): + """Return href string with proper limit and marker params.""" + params = request.params.copy() + params["marker"] = identifier + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_volume_base_URL) + url = os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name) + return "%s?%s" % (url, dict_to_query_str(params)) + + def _get_href_link(self, request, identifier): + """Return an href string pointing to this object.""" + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_volume_base_URL) + return os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_bookmark_link(self, request, identifier): + """Create a URL that refers to a specific resource.""" + base_url = remove_version_from_href(request.application_url) + base_url = self._update_link_prefix(base_url, + FLAGS.osapi_volume_base_URL) + return os.path.join(base_url, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_collection_links(self, request, items, id_key="uuid"): + """Retrieve 'next' link, if applicable.""" + links = [] + limit = int(request.params.get("limit", 0)) + if limit and limit == len(items): + last_item = items[-1] + if id_key in last_item: + last_item_id = last_item[id_key] + else: + last_item_id = last_item["id"] + links.append({ + "rel": "next", + "href": self._get_next_link(request, last_item_id), + }) + return links + + def _update_link_prefix(self, orig_url, prefix): + if not prefix: + return orig_url + url_parts = list(urlparse.urlsplit(orig_url)) + prefix_parts = list(urlparse.urlsplit(prefix)) + url_parts[0:2] = prefix_parts[0:2] + return urlparse.urlunsplit(url_parts) + + +class MetadataDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = utils.safe_minidom_parse_string(text) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + +class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = utils.safe_minidom_parse_string(text) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +class MetadataXMLDeserializer(wsgi.XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + if metadata_node is None: + return {} + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + def _extract_metadata_container(self, datastring): + dom = utils.safe_minidom_parse_string(datastring) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + def create(self, datastring): + return self._extract_metadata_container(datastring) + + def update_all(self, datastring): + return self._extract_metadata_container(datastring) + + def update(self, datastring): + dom = utils.safe_minidom_parse_string(datastring) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +metadata_nsmap = {None: xmlutil.XMLNS_V11} + + +class MetaItemTemplate(xmlutil.TemplateBuilder): + def construct(self): + sel = xmlutil.Selector('meta', xmlutil.get_items, 0) + root = xmlutil.TemplateElement('meta', selector=sel) + root.set('key', 0) + root.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +class MetadataTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return True + + +class MetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = MetadataTemplateElement('metadata', selector='metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) diff --git a/cinder/api/contrib/__init__.py b/cinder/api/contrib/__init__.py new file mode 100644 index 0000000000..503bc90b8f --- /dev/null +++ b/cinder/api/contrib/__init__.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contrib contains extensions that are shipped with cinder. + +It can't be called 'extensions' because that causes namespacing problems. + +""" + +from cinder.api import extensions +from cinder import flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def standard_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) + + +def select_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, + FLAGS.osapi_volume_ext_list) diff --git a/cinder/api/contrib/admin_actions.py b/cinder/api/contrib/admin_actions.py new file mode 100644 index 0000000000..a45b370776 --- /dev/null +++ b/cinder/api/contrib/admin_actions.py @@ -0,0 +1,174 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +from webob import exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +class AdminController(wsgi.Controller): + """Abstract base class for AdminControllers.""" + + collection = None # api collection to extend + + # FIXME(clayg): this will be hard to keep up-to-date + # Concrete classes can expand or over-ride + valid_status = set([ + 'creating', + 'available', + 'deleting', + 'error', + 'error_deleting', + ]) + + def __init__(self, *args, **kwargs): + super(AdminController, self).__init__(*args, **kwargs) + # singular name of the resource + self.resource_name = self.collection.rstrip('s') + self.volume_api = volume.API() + + def _update(self, *args, **kwargs): + raise NotImplementedError() + + def _get(self, *args, **kwargs): + raise NotImplementedError() + + def _delete(self, *args, **kwargs): + raise NotImplementedError() + + def validate_update(self, body): + update = {} + try: + update['status'] = body['status'] + except (TypeError, KeyError): + raise exc.HTTPBadRequest("Must specify 'status'") + if update['status'] not in self.valid_status: + raise exc.HTTPBadRequest("Must specify a valid status") + return update + + def authorize(self, context, action_name): + # e.g. "snapshot_admin_actions:reset_status" + action = '%s_admin_actions:%s' % (self.resource_name, action_name) + extensions.extension_authorizer('volume', action)(context) + + @wsgi.action('os-reset_status') + def _reset_status(self, req, id, body): + """Reset status on the resource.""" + context = req.environ['cinder.context'] + self.authorize(context, 'reset_status') + update = self.validate_update(body['os-reset_status']) + msg = _("Updating %(resource)s '%(id)s' with '%(update)r'") + LOG.debug(msg, {'resource': self.resource_name, 'id': id, + 'update': update}) + try: + self._update(context, id, update) + except exception.NotFound, e: + raise exc.HTTPNotFound(e) + return webob.Response(status_int=202) + + @wsgi.action('os-force_delete') + def _force_delete(self, req, id, body): + """Delete a resource, bypassing the check that it must be available.""" + context = req.environ['cinder.context'] + self.authorize(context, 'force_delete') + try: + resource = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + self._delete(context, resource, force=True) + return webob.Response(status_int=202) + + +class VolumeAdminController(AdminController): + """AdminController for Volumes.""" + + collection = 'volumes' + valid_status = AdminController.valid_status.union( + set(['attaching', 'in-use', 'detaching'])) + + def _update(self, *args, **kwargs): + db.volume_update(*args, **kwargs) + + def _get(self, *args, **kwargs): + return self.volume_api.get(*args, **kwargs) + + def _delete(self, *args, **kwargs): + return self.volume_api.delete(*args, **kwargs) + + def validate_update(self, body): + update = super(VolumeAdminController, self).validate_update(body) + if 'attach_status' in body: + if body['attach_status'] not in ('detached', 'attached'): + raise exc.HTTPBadRequest("Must specify a valid attach_status") + update['attach_status'] = body['attach_status'] + return update + + @wsgi.action('os-force_detach') + def _force_detach(self, req, id, body): + """ + Roll back a bad detach after the volume been disconnected from + the hypervisor. + """ + context = req.environ['cinder.context'] + self.authorize(context, 'force_detach') + try: + volume = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + self.volume_api.terminate_connection(context, volume, + {}, force=True) + self.volume_api.detach(context, volume) + return webob.Response(status_int=202) + + +class SnapshotAdminController(AdminController): + """AdminController for Snapshots.""" + + collection = 'snapshots' + + def _update(self, *args, **kwargs): + db.snapshot_update(*args, **kwargs) + + def _get(self, *args, **kwargs): + return self.volume_api.get_snapshot(*args, **kwargs) + + def _delete(self, *args, **kwargs): + return self.volume_api.delete_snapshot(*args, **kwargs) + + +class Admin_actions(extensions.ExtensionDescriptor): + """Enable admin actions.""" + + name = "AdminActions" + alias = "os-admin-actions" + namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1" + updated = "2012-08-25T00:00:00+00:00" + + def get_controller_extensions(self): + exts = [] + for class_ in (VolumeAdminController, SnapshotAdminController): + controller = class_() + extension = extensions.ControllerExtension( + self, class_.collection, controller) + exts.append(extension) + return exts diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py new file mode 100644 index 0000000000..02444ac550 --- /dev/null +++ b/cinder/api/contrib/backups.py @@ -0,0 +1,278 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The backups api.""" + +import webob +from webob import exc +from xml.dom import minidom + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import backups as backup_views +from cinder.api import xmlutil +from cinder import backup as backupAPI +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def make_backup(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('container') + elem.set('volume_id') + elem.set('object_count') + elem.set('availability_zone') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('fail_reason') + + +def make_backup_restore(elem): + elem.set('backup_id') + elem.set('volume_id') + + +class BackupTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('backup', selector='backup') + make_backup(root) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class BackupsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('backups') + elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups') + make_backup(elem) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class BackupRestoreTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('restore', selector='restore') + make_backup_restore(root) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class CreateDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = minidom.parseString(string) + backup = self._extract_backup(dom) + return {'body': {'backup': backup}} + + def _extract_backup(self, node): + backup = {} + backup_node = self.find_first_child_named(node, 'backup') + + attributes = ['container', 'display_name', + 'display_description', 'volume_id'] + + for attr in attributes: + if backup_node.getAttribute(attr): + backup[attr] = backup_node.getAttribute(attr) + return backup + + +class RestoreDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = minidom.parseString(string) + restore = self._extract_restore(dom) + return {'body': {'restore': restore}} + + def _extract_restore(self, node): + restore = {} + restore_node = self.find_first_child_named(node, 'restore') + if restore_node.getAttribute('volume_id'): + restore['volume_id'] = restore_node.getAttribute('volume_id') + return restore + + +class BackupsController(wsgi.Controller): + """The Backups API controller for the OpenStack API.""" + + _view_builder_class = backup_views.ViewBuilder + + def __init__(self): + self.backup_api = backupAPI.API() + super(BackupsController, self).__init__() + + @wsgi.serializers(xml=BackupTemplate) + def show(self, req, id): + """Return data about the given backup.""" + LOG.debug(_('show called for member %s'), id) + context = req.environ['cinder.context'] + + try: + backup = self.backup_api.get(context, backup_id=id) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + + return self._view_builder.detail(req, backup) + + def delete(self, req, id): + """Delete a backup.""" + LOG.debug(_('delete called for member %s'), id) + context = req.environ['cinder.context'] + + LOG.audit(_('Delete backup with id: %s'), id, context=context) + + try: + self.backup_api.delete(context, id) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + except exception.InvalidBackup as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + return webob.Response(status_int=202) + + @wsgi.serializers(xml=BackupsTemplate) + def index(self, req): + """Returns a summary list of backups.""" + return self._get_backups(req, is_detail=False) + + @wsgi.serializers(xml=BackupsTemplate) + def detail(self, req): + """Returns a detailed list of backups.""" + return self._get_backups(req, is_detail=True) + + def _get_backups(self, req, is_detail): + """Returns a list of backups, transformed through view builder.""" + context = req.environ['cinder.context'] + backups = self.backup_api.get_all(context) + limited_list = common.limited(backups, req) + + if is_detail: + backups = self._view_builder.detail_list(req, limited_list) + else: + backups = self._view_builder.summary_list(req, limited_list) + return backups + + # TODO(frankm): Add some checks here including + # - whether requested volume_id exists so we can return some errors + # immediately + # - maybe also do validation of swift container name + @wsgi.response(202) + @wsgi.serializers(xml=BackupTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Create a new backup.""" + LOG.debug(_('Creating new backup %s'), body) + if not self.is_valid_body(body, 'backup'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + backup = body['backup'] + volume_id = backup['volume_id'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + container = backup.get('container', None) + name = backup.get('name', None) + description = backup.get('description', None) + + LOG.audit(_("Creating backup of volume %(volume_id)s in container" + " %(container)s"), locals(), context=context) + + try: + new_backup = self.backup_api.create(context, name, description, + volume_id, container) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + + retval = self._view_builder.summary(req, dict(new_backup.iteritems())) + return retval + + @wsgi.response(202) + @wsgi.serializers(xml=BackupRestoreTemplate) + @wsgi.deserializers(xml=RestoreDeserializer) + def restore(self, req, id, body): + """Restore an existing backup to a volume.""" + backup_id = id + LOG.debug(_('Restoring backup %(backup_id)s (%(body)s)') % locals()) + if not self.is_valid_body(body, 'restore'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + restore = body['restore'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + volume_id = restore.get('volume_id', None) + + LOG.audit(_("Restoring backup %(backup_id)s to volume %(volume_id)s"), + locals(), context=context) + + try: + new_restore = self.backup_api.restore(context, + backup_id=backup_id, + volume_id=volume_id) + except exception.InvalidInput as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.InvalidBackup as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=unicode(error)) + except exception.VolumeSizeExceedsAvailableQuota as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.message, headers={'Retry-After': 0}) + except exception.VolumeLimitExceeded as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.message, headers={'Retry-After': 0}) + + retval = self._view_builder.restore_summary( + req, dict(new_restore.iteritems())) + return retval + + +class Backups(extensions.ExtensionDescriptor): + """Backups support.""" + + name = 'Backups' + alias = 'backups' + namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1' + updated = '2012-12-12T00:00:00+00:00' + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + Backups.alias, BackupsController(), + collection_actions={'detail': 'GET'}, + member_actions={'restore': 'POST'}) + resources.append(res) + return resources diff --git a/cinder/api/contrib/extended_snapshot_attributes.py b/cinder/api/contrib/extended_snapshot_attributes.py new file mode 100644 index 0000000000..f55fa53ac2 --- /dev/null +++ b/cinder/api/contrib/extended_snapshot_attributes.py @@ -0,0 +1,125 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Extended Snapshot Attributes API extension.""" + +from webob import exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import volume + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) +authorize = extensions.soft_extension_authorizer( + 'volume', + 'extended_snapshot_attributes') + + +class ExtendedSnapshotAttributesController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(ExtendedSnapshotAttributesController, self).__init__(*args, + **kwargs) + self.volume_api = volume.API() + + def _get_snapshots(self, context): + snapshots = self.volume_api.get_all_snapshots(context) + rval = dict((snapshot['id'], snapshot) for snapshot in snapshots) + return rval + + def _extend_snapshot(self, context, snapshot, data): + for attr in ['project_id', 'progress']: + key = "%s:%s" % (Extended_snapshot_attributes.alias, attr) + snapshot[key] = data[attr] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + # Attach our slave template to the response object + resp_obj.attach(xml=ExtendedSnapshotAttributeTemplate()) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + explanation = _("Snapshot not found.") + raise exc.HTTPNotFound(explanation=explanation) + + self._extend_snapshot(context, resp_obj.obj['snapshot'], snapshot) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + # Attach our slave template to the response object + resp_obj.attach(xml=ExtendedSnapshotAttributesTemplate()) + + snapshots = list(resp_obj.obj.get('snapshots', [])) + db_snapshots = self._get_snapshots(context) + + for snapshot_object in snapshots: + try: + snapshot_data = db_snapshots[snapshot_object['id']] + except KeyError: + continue + + self._extend_snapshot(context, snapshot_object, snapshot_data) + + +class Extended_snapshot_attributes(extensions.ExtensionDescriptor): + """Extended SnapshotAttributes support.""" + + name = "ExtendedSnapshotAttributes" + alias = "os-extended-snapshot-attributes" + namespace = ("http://docs.openstack.org/volume/ext/" + "extended_snapshot_attributes/api/v1") + updated = "2012-06-19T00:00:00+00:00" + + def get_controller_extensions(self): + controller = ExtendedSnapshotAttributesController() + extension = extensions.ControllerExtension(self, 'snapshots', + controller) + return [extension] + + +def make_snapshot(elem): + elem.set('{%s}project_id' % Extended_snapshot_attributes.namespace, + '%s:project_id' % Extended_snapshot_attributes.alias) + elem.set('{%s}progress' % Extended_snapshot_attributes.namespace, + '%s:progress' % Extended_snapshot_attributes.alias) + + +class ExtendedSnapshotAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + alias = Extended_snapshot_attributes.alias + namespace = Extended_snapshot_attributes.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class ExtendedSnapshotAttributesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + alias = Extended_snapshot_attributes.alias + namespace = Extended_snapshot_attributes.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/hosts.py b/cinder/api/contrib/hosts.py new file mode 100644 index 0000000000..0fc85481d6 --- /dev/null +++ b/cinder/api/contrib/hosts.py @@ -0,0 +1,265 @@ +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The hosts admin extension.""" + +import webob.exc +from xml.parsers import expat + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils +from cinder.volume import api as volume_api + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('volume', 'hosts') + + +class HostIndexTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('hosts') + elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts') + elem.set('service-status') + elem.set('service') + elem.set('zone') + elem.set('service-state') + elem.set('host_name') + elem.set('last-update') + + return xmlutil.MasterTemplate(root, 1) + + +class HostUpdateTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + root.set('status') + + return xmlutil.MasterTemplate(root, 1) + + +class HostActionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + + return xmlutil.MasterTemplate(root, 1) + + +class HostShowTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + elem = xmlutil.make_flat_dict('resource', selector='host', + subselector='resource') + root.append(elem) + + return xmlutil.MasterTemplate(root, 1) + + +class HostDeserializer(wsgi.XMLDeserializer): + def default(self, string): + try: + node = utils.safe_minidom_parse_string(string) + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + updates = {} + for child in node.childNodes[0].childNodes: + updates[child.tagName] = self.extract_text(child) + + return dict(body=updates) + + +def _list_hosts(req, service=None): + """Returns a summary list of hosts.""" + curr_time = timeutils.utcnow() + context = req.environ['cinder.context'] + services = db.service_get_all(context, False) + zone = '' + if 'zone' in req.GET: + zone = req.GET['zone'] + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for host in services: + delta = curr_time - (host['updated_at'] or host['created_at']) + alive = abs(utils.total_seconds(delta)) <= FLAGS.service_down_time + status = (alive and "available") or "unavailable" + active = 'enabled' + if host['disabled']: + active = 'disabled' + LOG.debug('status, active and update: %s, %s, %s' % + (status, active, host['updated_at'])) + hosts.append({'host_name': host['host'], + 'service': host['topic'], + 'zone': host['availability_zone'], + 'service-status': status, + 'service-state': active, + 'last-update': host['updated_at']}) + if service: + hosts = [host for host in hosts + if host["service"] == service] + return hosts + + +def check_host(fn): + """Makes sure that the host exists.""" + def wrapped(self, req, id, service=None, *args, **kwargs): + listed_hosts = _list_hosts(req, service) + hosts = [h["host_name"] for h in listed_hosts] + if id in hosts: + return fn(self, req, id, *args, **kwargs) + else: + message = _("Host '%s' could not be found.") % id + raise webob.exc.HTTPNotFound(explanation=message) + return wrapped + + +class HostController(object): + """The Hosts API controller for the OpenStack API.""" + def __init__(self): + self.api = volume_api.HostAPI() + super(HostController, self).__init__() + + @wsgi.serializers(xml=HostIndexTemplate) + def index(self, req): + authorize(req.environ['cinder.context']) + return {'hosts': _list_hosts(req)} + + @wsgi.serializers(xml=HostUpdateTemplate) + @wsgi.deserializers(xml=HostDeserializer) + @check_host + def update(self, req, id, body): + authorize(req.environ['cinder.context']) + update_values = {} + for raw_key, raw_val in body.iteritems(): + key = raw_key.lower().strip() + val = raw_val.lower().strip() + if key == "status": + if val in ("enable", "disable"): + update_values['status'] = val.startswith("enable") + else: + explanation = _("Invalid status: '%s'") % raw_val + raise webob.exc.HTTPBadRequest(explanation=explanation) + else: + explanation = _("Invalid update setting: '%s'") % raw_key + raise webob.exc.HTTPBadRequest(explanation=explanation) + update_setters = {'status': self._set_enabled_status} + result = {} + for key, value in update_values.iteritems(): + result.update(update_setters[key](req, id, value)) + return result + + def _set_enabled_status(self, req, host, enabled): + """Sets the specified host's ability to accept new volumes.""" + context = req.environ['cinder.context'] + state = "enabled" if enabled else "disabled" + LOG.audit(_("Setting host %(host)s to %(state)s.") % locals()) + result = self.api.set_host_enabled(context, + host=host, + enabled=enabled) + if result not in ("enabled", "disabled"): + # An error message was returned + raise webob.exc.HTTPBadRequest(explanation=result) + return {"host": host, "status": result} + + @wsgi.serializers(xml=HostShowTemplate) + def show(self, req, id): + """Shows the volume usage info given by hosts. + + :param context: security context + :param host: hostname + :returns: expected to use HostShowTemplate. + ex.:: + + {'host': {'resource':D},..} + D: {'host': 'hostname','project': 'admin', + 'volume_count': 1, 'total_volume_gb': 2048} + """ + host = id + context = req.environ['cinder.context'] + if not context.is_admin: + msg = _("Describe-resource is admin only functionality") + raise webob.exc.HTTPForbidden(explanation=msg) + + try: + host_ref = db.service_get_by_host_and_topic(context, + host, + FLAGS.volume_topic) + except exception.ServiceNotFound: + raise webob.exc.HTTPNotFound(explanation=_("Host not found")) + + # Getting total available/used resource + # TODO(jdg): Add summary info for Snapshots + volume_refs = db.volume_get_all_by_host(context, host_ref['host']) + (count, sum) = db.volume_data_get_for_host(context, + host_ref['host']) + + snap_count_total = 0 + snap_sum_total = 0 + resources = [{'resource': {'host': host, 'project': '(total)', + 'volume_count': str(count), + 'total_volume_gb': str(sum), + 'snapshot_count': str(snap_count_total), + 'total_snapshot_gb': str(snap_sum_total)}}] + + project_ids = [v['project_id'] for v in volume_refs] + project_ids = list(set(project_ids)) + for project_id in project_ids: + (count, sum) = db.volume_data_get_for_project(context, project_id) + (snap_count, snap_sum) = db.snapshot_data_get_for_project( + context, + project_id) + resources.append( + {'resource': + {'host': host, + 'project': project_id, + 'volume_count': str(count), + 'total_volume_gb': str(sum), + 'snapshot_count': str(snap_count), + 'total_snapshot_gb': str(snap_sum)}}) + snap_count_total += int(snap_count) + snap_sum_total += int(snap_sum) + resources[0]['resource']['snapshot_count'] = str(snap_count_total) + resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total) + return {"host": resources} + + +class Hosts(extensions.ExtensionDescriptor): + """Admin-only host administration""" + + name = "Hosts" + alias = "os-hosts" + namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1" + updated = "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [extensions.ResourceExtension('os-hosts', + HostController(), + collection_actions={ + 'update': 'PUT'}, + member_actions={ + 'startup': 'GET', + 'shutdown': 'GET', + 'reboot': 'GET'})] + return resources diff --git a/cinder/api/contrib/image_create.py b/cinder/api/contrib/image_create.py new file mode 100644 index 0000000000..a3ee923e72 --- /dev/null +++ b/cinder/api/contrib/image_create.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NTT. +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Create Volume from Image extension.""" + + +from cinder.api import extensions + + +class Image_create(extensions.ExtensionDescriptor): + """Allow creating a volume from an image in the Create Volume v1 API""" + + name = "CreateVolumeExtension" + alias = "os-image-create" + namespace = "http://docs.openstack.org/volume/ext/image-create/api/v1" + updated = "2012-08-13T00:00:00+00:00" diff --git a/cinder/api/contrib/quota_classes.py b/cinder/api/contrib/quota_classes.py new file mode 100644 index 0000000000..597ccd004c --- /dev/null +++ b/cinder/api/contrib/quota_classes.py @@ -0,0 +1,103 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder import quota + + +QUOTAS = quota.QUOTAS + + +authorize = extensions.extension_authorizer('volume', 'quota_classes') + + +class QuotaClassTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('quota_class_set', + selector='quota_class_set') + root.set('id') + + for resource in QUOTAS.resources: + elem = xmlutil.SubTemplateElement(root, resource) + elem.text = resource + + return xmlutil.MasterTemplate(root, 1) + + +class QuotaClassSetsController(object): + + def _format_quota_set(self, quota_class, quota_set): + """Convert the quota object to a result dict""" + + result = dict(id=str(quota_class)) + + for resource in QUOTAS.resources: + result[resource] = quota_set[resource] + + return dict(quota_class_set=result) + + @wsgi.serializers(xml=QuotaClassTemplate) + def show(self, req, id): + context = req.environ['cinder.context'] + authorize(context) + try: + db.sqlalchemy.api.authorize_quota_class_context(context, id) + except exception.NotAuthorized: + raise webob.exc.HTTPForbidden() + + return self._format_quota_set(id, + QUOTAS.get_class_quotas(context, id)) + + @wsgi.serializers(xml=QuotaClassTemplate) + def update(self, req, id, body): + context = req.environ['cinder.context'] + authorize(context) + quota_class = id + for key in body['quota_class_set'].keys(): + if key in QUOTAS: + value = int(body['quota_class_set'][key]) + try: + db.quota_class_update(context, quota_class, key, value) + except exception.QuotaClassNotFound: + db.quota_class_create(context, quota_class, key, value) + except exception.AdminRequired: + raise webob.exc.HTTPForbidden() + return {'quota_class_set': QUOTAS.get_class_quotas(context, + quota_class)} + + +class Quota_classes(extensions.ExtensionDescriptor): + """Quota classes management support""" + + name = "QuotaClasses" + alias = "os-quota-class-sets" + namespace = ("http://docs.openstack.org/volume/ext/" + "quota-classes-sets/api/v1.1") + updated = "2012-03-12T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quota-class-sets', + QuotaClassSetsController()) + resources.append(res) + + return resources diff --git a/cinder/api/contrib/quotas.py b/cinder/api/contrib/quotas.py new file mode 100644 index 0000000000..374eed1803 --- /dev/null +++ b/cinder/api/contrib/quotas.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder.db.sqlalchemy import api as sqlalchemy_api +from cinder import exception +from cinder import quota + + +QUOTAS = quota.QUOTAS + + +authorize_update = extensions.extension_authorizer('compute', 'quotas:update') +authorize_show = extensions.extension_authorizer('compute', 'quotas:show') + + +class QuotaTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('quota_set', selector='quota_set') + root.set('id') + + for resource in QUOTAS.resources: + elem = xmlutil.SubTemplateElement(root, resource) + elem.text = resource + + return xmlutil.MasterTemplate(root, 1) + + +class QuotaSetsController(object): + + def _format_quota_set(self, project_id, quota_set): + """Convert the quota object to a result dict""" + + result = dict(id=str(project_id)) + + for resource in QUOTAS.resources: + result[resource] = quota_set[resource] + + return dict(quota_set=result) + + def _validate_quota_limit(self, limit): + # NOTE: -1 is a flag value for unlimited + if limit < -1: + msg = _("Quota limit must be -1 or greater.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_quotas(self, context, id, usages=False): + values = QUOTAS.get_project_quotas(context, id, usages=usages) + + if usages: + return values + else: + return dict((k, v['limit']) for k, v in values.items()) + + @wsgi.serializers(xml=QuotaTemplate) + def show(self, req, id): + context = req.environ['cinder.context'] + authorize_show(context) + try: + sqlalchemy_api.authorize_project_context(context, id) + except exception.NotAuthorized: + raise webob.exc.HTTPForbidden() + + return self._format_quota_set(id, self._get_quotas(context, id)) + + @wsgi.serializers(xml=QuotaTemplate) + def update(self, req, id, body): + context = req.environ['cinder.context'] + authorize_update(context) + project_id = id + for key in body['quota_set'].keys(): + if key in QUOTAS: + value = int(body['quota_set'][key]) + self._validate_quota_limit(value) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + except exception.AdminRequired: + raise webob.exc.HTTPForbidden() + return {'quota_set': self._get_quotas(context, id)} + + @wsgi.serializers(xml=QuotaTemplate) + def defaults(self, req, id): + context = req.environ['cinder.context'] + authorize_show(context) + return self._format_quota_set(id, QUOTAS.get_defaults(context)) + + +class Quotas(extensions.ExtensionDescriptor): + """Quotas management support""" + + name = "Quotas" + alias = "os-quota-sets" + namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" + updated = "2011-08-08T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quota-sets', + QuotaSetsController(), + member_actions={'defaults': 'GET'}) + resources.append(res) + + return resources diff --git a/cinder/api/contrib/services.py b/cinder/api/contrib/services.py new file mode 100644 index 0000000000..3efac96f63 --- /dev/null +++ b/cinder/api/contrib/services.py @@ -0,0 +1,139 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import webob.exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils + + +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('volume', 'services') + + +class ServicesIndexTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('services') + elem = xmlutil.SubTemplateElement(root, 'service', selector='services') + elem.set('binary') + elem.set('host') + elem.set('zone') + elem.set('status') + elem.set('state') + elem.set('update_at') + + return xmlutil.MasterTemplate(root, 1) + + +class ServicesUpdateTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + root.set('service') + root.set('disabled') + + return xmlutil.MasterTemplate(root, 1) + + +class ServiceController(object): + @wsgi.serializers(xml=ServicesIndexTemplate) + def index(self, req): + """ + Return a list of all running services. Filter by host & service name. + """ + context = req.environ['cinder.context'] + authorize(context) + now = timeutils.utcnow() + services = db.service_get_all(context) + + host = '' + if 'host' in req.GET: + host = req.GET['host'] + service = '' + if 'service' in req.GET: + service = req.GET['service'] + if host: + services = [s for s in services if s['host'] == host] + if service: + services = [s for s in services if s['binary'] == service] + + svcs = [] + for svc in services: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = abs(utils.total_seconds(delta)) + art = (alive and "up") or "down" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + svcs.append({"binary": svc['binary'], 'host': svc['host'], + 'zone': svc['availability_zone'], + 'status': active, 'state': art, + 'updated_at': svc['updated_at']}) + return {'services': svcs} + + @wsgi.serializers(xml=ServicesUpdateTemplate) + def update(self, req, id, body): + """Enable/Disable scheduling for a service""" + context = req.environ['cinder.context'] + authorize(context) + + if id == "enable": + disabled = False + elif id == "disable": + disabled = True + else: + raise webob.exc.HTTPNotFound("Unknown action") + + try: + host = body['host'] + service = body['service'] + except (TypeError, KeyError): + raise webob.exc.HTTPBadRequest() + + try: + svc = db.service_get_by_args(context, host, service) + if not svc: + raise webob.exc.HTTPNotFound('Unknown service') + + db.service_update(context, svc['id'], {'disabled': disabled}) + except exception.ServiceNotFound: + raise webob.exc.HTTPNotFound("service not found") + + return {'host': host, 'service': service, 'disabled': disabled} + + +class Services(extensions.ExtensionDescriptor): + """Services support""" + + name = "Services" + alias = "os-services" + namespace = "http://docs.openstack.org/volume/ext/services/api/v2" + updated = "2012-10-28T00:00:00-00:00" + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('os-services', + ServiceController()) + resources.append(resource) + return resources diff --git a/cinder/api/contrib/share_actions.py b/cinder/api/contrib/share_actions.py new file mode 100644 index 0000000000..33040e722a --- /dev/null +++ b/cinder/api/contrib/share_actions.py @@ -0,0 +1,80 @@ +# Copyright 2013 NetApp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import share + + +class ShareActionsController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(ShareActionsController, self).__init__(*args, **kwargs) + self.share_api = share.API() + + @wsgi.action('os-allow_access') + def _allow_access(self, req, id, body): + """Add share access rule.""" + context = req.environ['cinder.context'] + + share = self.share_api.get(context, id) + + access_type = body['os-allow_access']['access_type'] + access_to = body['os-allow_access']['access_to'] + + self.share_api.allow_access(context, share, access_type, access_to) + return webob.Response(status_int=202) + + @wsgi.action('os-deny_access') + def _deny_access(self, req, id, body): + """Remove access rule.""" + context = req.environ['cinder.context'] + + access_id = body['os-deny_access']['access_id'] + + try: + access = self.share_api.access_get(context, access_id) + if access.share_id != id: + raise exception.NotFound() + share = self.share_api.get(context, id) + except exception.NotFound, error: + raise webob.exc.HTTPNotFound(explanation=unicode(error)) + self.share_api.deny_access(context, share, access) + return webob.Response(status_int=202) + + @wsgi.action('os-access_list') + def _access_list(self, req, id, body): + """list access rules.""" + context = req.environ['cinder.context'] + + share = self.share_api.get(context, id) + access_list = self.share_api.access_get_all(context, share) + return {'access_list': access_list} + + +class Share_actions(extensions.ExtensionDescriptor): + """Enable share actions.""" + + name = 'ShareActions' + alias = 'share-actions' + namespace = '' + updated = '2012-08-14T00:00:00+00:00' + + def get_controller_extensions(self): + controller = ShareActionsController() + extension = extensions.ControllerExtension(self, 'shares', + controller) + return [extension] diff --git a/cinder/api/contrib/share_snapshots.py b/cinder/api/contrib/share_snapshots.py new file mode 100644 index 0000000000..56c4affff2 --- /dev/null +++ b/cinder/api/contrib/share_snapshots.py @@ -0,0 +1,181 @@ +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The share snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.contrib import shares +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import share_snapshots as snapshot_views +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import share + + +LOG = logging.getLogger(__name__) + + +def make_snapshot(elem): + elem.set('id') + elem.set('size') + elem.set('status') + elem.set('name') + elem.set('description') + elem.set('share_proto') + elem.set('export_location') + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('share-snapshot', + selector='share-snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('share-snapshots') + elem = xmlutil.SubTemplateElement(root, 'share-snapshot', + selector='share-snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class ShareSnapshotsController(wsgi.Controller): + """The Share Snapshots API controller for the OpenStack API.""" + + _view_builder_class = snapshot_views.ViewBuilder + + def __init__(self): + super(ShareSnapshotsController, self).__init__() + self.share_api = share.API() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + snapshot = self.share_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._view_builder.detail(req, snapshot) + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.share_api.get_snapshot(context, id) + self.share_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._get_snapshots(req, is_detail=False) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._get_snapshots(req, is_detail=True) + + def _get_snapshots(self, req, is_detail): + """Returns a list of snapshots.""" + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + + # NOTE(rushiagr): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + shares.remove_invalid_options(context, search_opts, + self._get_snapshots_search_options()) + + snapshots = self.share_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + if is_detail: + snapshots = self._view_builder.detail_list(req, limited_list) + else: + snapshots = self._view_builder.summary_list(req, limited_list) + return snapshots + + def _get_snapshots_search_options(self): + """Return share search options allowed by non-admin.""" + return ('name', 'status', 'share_id') + + @wsgi.response(202) + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'share-snapshot'): + raise exc.HTTPUnprocessableEntity() + + snapshot = body['share-snapshot'] + + share_id = snapshot['share_id'] + share = self.share_api.get(context, share_id) + msg = _("Create snapshot from share %s") + LOG.audit(msg, share_id, context=context) + + # NOTE(rushiagr): v2 API allows name instead of display_name + if 'name' in snapshot: + snapshot['display_name'] = snapshot.get('name') + del snapshot['name'] + + # NOTE(rushiagr): v2 API allows description instead of + # display_description + if 'description' in snapshot: + snapshot['display_description'] = snapshot.get('description') + del snapshot['description'] + + new_snapshot = self.share_api.create_snapshot( + context, + share, + snapshot.get('display_name'), + snapshot.get('display_description')) + return self._view_builder.summary(req, dict(new_snapshot.iteritems())) + + +class Share_snapshots(extensions.ExtensionDescriptor): + """Enable share snapshtos API.""" + name = 'ShareSnapshots' + alias = 'share-snapshots' + namespace = '' + updated = '2013-03-01T00:00:00+00:00' + + def get_resources(self): + controller = ShareSnapshotsController() + resource = extensions.ResourceExtension( + 'share-snapshots', controller, + collection_actions={'detail': 'GET'}) + return [resource] diff --git a/cinder/api/contrib/shares.py b/cinder/api/contrib/shares.py new file mode 100644 index 0000000000..104087ce1a --- /dev/null +++ b/cinder/api/contrib/shares.py @@ -0,0 +1,215 @@ +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The shares api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import shares as share_views +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import share + + +LOG = logging.getLogger(__name__) + + +def make_share(elem): + elem.set('id') + elem.set('size') + elem.set('availability_zone') + elem.set('status') + elem.set('name') + elem.set('description') + elem.set('share_proto') + elem.set('export_location') + + +def remove_invalid_options(context, search_options, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in search_options + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%(bad_options)s' from query") % locals() + LOG.debug(log_msg) + for opt in unknown_options: + del search_options[opt] + + +class ShareTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('share', selector='share') + make_share(root) + return xmlutil.MasterTemplate(root, 1) + + +class SharesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('shares') + elem = xmlutil.SubTemplateElement(root, 'share', selector='shares') + make_share(elem) + return xmlutil.MasterTemplate(root, 1) + + +class ShareController(wsgi.Controller): + """The Shares API controller for the OpenStack API.""" + + _view_builder_class = share_views.ViewBuilder + + def __init__(self): + super(ShareController, self).__init__() + self.share_api = share.API() + + @wsgi.serializers(xml=ShareTemplate) + def show(self, req, id): + """Return data about the given share.""" + context = req.environ['cinder.context'] + + try: + share = self.share_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._view_builder.detail(req, share) + + def delete(self, req, id): + """Delete a share.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete share with id: %s"), id, context=context) + + try: + share = self.share_api.get(context, id) + self.share_api.delete(context, share) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SharesTemplate) + def index(self, req): + """Returns a summary list of shares.""" + return self._get_shares(req, is_detail=False) + + @wsgi.serializers(xml=SharesTemplate) + def detail(self, req): + """Returns a detailed list of shares.""" + return self._get_shares(req, is_detail=True) + + def _get_shares(self, req, is_detail): + """Returns a list of shares, transformed through view + builder. + """ + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + + # NOTE(rushiagr): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + remove_invalid_options(context, search_opts, + self._get_share_search_options()) + + shares = self.share_api.get_all(context, search_opts=search_opts) + + limited_list = common.limited(shares, req) + + if is_detail: + shares = self._view_builder.detail_list(req, limited_list) + else: + shares = self._view_builder.summary_list(req, limited_list) + return shares + + def _get_share_search_options(self): + """Return share search options allowed by non-admin.""" + return ('name', 'status') + + @wsgi.serializers(xml=ShareTemplate) + def create(self, req, body): + """Creates a new share.""" + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'share'): + raise exc.HTTPUnprocessableEntity() + + share = body['share'] + + # NOTE(rushiagr): v2 API allows name instead of display_name + if share.get('name'): + share['display_name'] = share.get('name') + del share['name'] + + # NOTE(rushiagr): v2 API allows description instead of + # display_description + if share.get('description'): + share['display_description'] = share.get('description') + del share['description'] + + size = share['size'] + share_proto = share['share_proto'].upper() + + msg = (_("Create %(share_proto)s share of %(size)s GB") % + {'share_proto': share_proto, 'size': size}) + LOG.audit(msg, context=context) + + kwargs = {} + kwargs['availability_zone'] = share.get('availability_zone') + + snapshot_id = share.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.share_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + display_name = share.get('display_name') + display_description = share.get('display_description') + new_share = self.share_api.create(context, + share_proto, + size, + display_name, + display_description, + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + return self._view_builder.summary(req, dict(new_share.iteritems())) + + +class Shares(extensions.ExtensionDescriptor): + """Enable share API.""" + name = 'Shares' + alias = 'shares' + namespace = '' + updated = '2013-01-29T00:00:00+00:00' + + def get_resources(self): + controller = ShareController() + resource = extensions.ResourceExtension( + 'shares', controller, collection_actions={'detail': 'GET'}, + member_actions={'action': 'POST'}) + return [resource] diff --git a/cinder/api/contrib/types_extra_specs.py b/cinder/api/contrib/types_extra_specs.py new file mode 100644 index 0000000000..7377f5245d --- /dev/null +++ b/cinder/api/contrib/types_extra_specs.py @@ -0,0 +1,162 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types extra specs extension""" + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.volume import volume_types + +authorize = extensions.extension_authorizer('volume', 'types_extra_specs') + + +class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder): + def construct(self): + tagname = xmlutil.Selector('key') + + def extraspec_sel(obj, do_raise=False): + # Have to extract the key and value for later use... + key, value = obj.items()[0] + return dict(key=key, value=value) + + root = xmlutil.TemplateElement(tagname, selector=extraspec_sel) + root.text = 'value' + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecsController(wsgi.Controller): + """ The volume type extra specs API controller for the OpenStack API """ + + def _get_extra_specs(self, context, type_id): + extra_specs = db.volume_type_extra_specs_get(context, type_id) + specs_dict = {} + for key, value in extra_specs.iteritems(): + specs_dict[key] = value + return dict(extra_specs=specs_dict) + + def _check_type(self, context, type_id): + try: + volume_types.get_volume_type(context, type_id) + except exception.NotFound as ex: + raise webob.exc.HTTPNotFound(explanation=unicode(ex)) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def index(self, req, type_id): + """ Returns the list of extra specs for a given volume type """ + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + return self._get_extra_specs(context, type_id) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def create(self, req, type_id, body=None): + context = req.environ['cinder.context'] + authorize(context) + + if not self.is_valid_body(body, 'extra_specs'): + raise webob.exc.HTTPBadRequest() + + self._check_type(context, type_id) + + specs = body['extra_specs'] + db.volume_type_extra_specs_update_or_create(context, + type_id, + specs) + notifier_info = dict(type_id=type_id, specs=specs) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.create', + notifier_api.INFO, notifier_info) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def update(self, req, type_id, id, body=None): + context = req.environ['cinder.context'] + authorize(context) + if not body: + expl = _('Request body empty') + raise webob.exc.HTTPBadRequest(explanation=expl) + self._check_type(context, type_id) + if id not in body: + expl = _('Request body and URI mismatch') + raise webob.exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise webob.exc.HTTPBadRequest(explanation=expl) + db.volume_type_extra_specs_update_or_create(context, + type_id, + body) + notifier_info = dict(type_id=type_id, id=id) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.update', + notifier_api.INFO, notifier_info) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def show(self, req, type_id, id): + """Return a single extra spec item.""" + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + specs = self._get_extra_specs(context, type_id) + if id in specs['extra_specs']: + return {id: specs['extra_specs'][id]} + else: + raise webob.exc.HTTPNotFound() + + def delete(self, req, type_id, id): + """ Deletes an existing extra spec """ + context = req.environ['cinder.context'] + self._check_type(context, type_id) + authorize(context) + db.volume_type_extra_specs_delete(context, type_id, id) + notifier_info = dict(type_id=type_id, id=id) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.delete', + notifier_api.INFO, notifier_info) + return webob.Response(status_int=202) + + +class Types_extra_specs(extensions.ExtensionDescriptor): + """Types extra specs support""" + + name = "TypesExtraSpecs" + alias = "os-types-extra-specs" + namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension('extra_specs', + VolumeTypeExtraSpecsController(), + parent=dict(member_name='type', + collection_name='types') + ) + resources.append(res) + + return resources diff --git a/cinder/api/contrib/types_manage.py b/cinder/api/contrib/types_manage.py new file mode 100644 index 0000000000..eb17e730be --- /dev/null +++ b/cinder/api/contrib/types_manage.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types manage extension.""" + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.v1 import types +from cinder.api.views import types as views_types +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.volume import volume_types + + +authorize = extensions.extension_authorizer('volume', 'types_manage') + + +class VolumeTypesManageController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + def _notify_voloume_type_error(self, context, method, payload): + notifier_api.notify(context, + 'volumeType', + method, + notifier_api.ERROR, + payload) + + @wsgi.action("create") + @wsgi.serializers(xml=types.VolumeTypeTemplate) + def _create(self, req, body): + """Creates a new volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + if not self.is_valid_body(body, 'volume_type'): + raise webob.exc.HTTPBadRequest() + + vol_type = body['volume_type'] + name = vol_type.get('name', None) + specs = vol_type.get('extra_specs', {}) + + if name is None or name == "": + raise webob.exc.HTTPBadRequest() + + try: + volume_types.create(context, name, specs) + vol_type = volume_types.get_volume_type_by_name(context, name) + notifier_info = dict(volume_types=vol_type) + notifier_api.notify(context, 'volumeType', + 'volume_type.create', + notifier_api.INFO, notifier_info) + + except exception.VolumeTypeExists as err: + notifier_err = dict(volume_types=vol_type, error_message=str(err)) + self._notify_voloume_type_error(context, + 'volume_type.create', + notifier_err) + + raise webob.exc.HTTPConflict(explanation=str(err)) + except exception.NotFound as err: + notifier_err = dict(volume_types=vol_type, error_message=str(err)) + self._notify_voloume_type_error(context, + 'volume_type.create', + notifier_err) + raise webob.exc.HTTPNotFound() + + return self._view_builder.show(req, vol_type) + + @wsgi.action("delete") + def _delete(self, req, id): + """Deletes an existing volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + try: + vol_type = volume_types.get_volume_type(context, id) + volume_types.destroy(context, vol_type['id']) + notifier_info = dict(volume_types=vol_type) + notifier_api.notify(context, 'volumeType', + 'volume_type.delete', + notifier_api.INFO, notifier_info) + except exception.NotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_voloume_type_error(context, + 'volume_type.delete', + notifier_err) + + raise webob.exc.HTTPNotFound() + + return webob.Response(status_int=202) + + +class Types_manage(extensions.ExtensionDescriptor): + """Types manage support.""" + + name = "TypesManage" + alias = "os-types-manage" + namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTypesManageController() + extension = extensions.ControllerExtension(self, 'types', controller) + return [extension] diff --git a/cinder/api/contrib/volume_actions.py b/cinder/api/contrib/volume_actions.py new file mode 100644 index 0000000000..50587c3692 --- /dev/null +++ b/cinder/api/contrib/volume_actions.py @@ -0,0 +1,204 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import common as rpc_common +from cinder import utils +from cinder import volume + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def authorize(context, action_name): + action = 'volume_actions:%s' % action_name + extensions.extension_authorizer('volume', action)(context) + + +class VolumeToImageSerializer(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('os-volume_upload_image', + selector='os-volume_upload_image') + root.set('id') + root.set('updated_at') + root.set('status') + root.set('display_description') + root.set('size') + root.set('volume_type') + root.set('image_id') + root.set('container_format') + root.set('disk_format') + root.set('image_name') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeToImageDeserializer(wsgi.XMLDeserializer): + """Deserializer to handle xml-formatted requests.""" + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + action_node = dom.childNodes[0] + action_name = action_node.tagName + + action_data = {} + attributes = ["force", "image_name", "container_format", "disk_format"] + for attr in attributes: + if action_node.hasAttribute(attr): + action_data[attr] = action_node.getAttribute(attr) + if 'force' in action_data and action_data['force'] == 'True': + action_data['force'] = True + return {'body': {action_name: action_data}} + + +class VolumeActionsController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeActionsController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + @wsgi.action('os-attach') + def _attach(self, req, id, body): + """Add attachment metadata.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + + instance_uuid = body['os-attach']['instance_uuid'] + mountpoint = body['os-attach']['mountpoint'] + + self.volume_api.attach(context, volume, + instance_uuid, mountpoint) + return webob.Response(status_int=202) + + @wsgi.action('os-detach') + def _detach(self, req, id, body): + """Clear attachment metadata.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.detach(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-reserve') + def _reserve(self, req, id, body): + """Mark volume as reserved.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.reserve_volume(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-unreserve') + def _unreserve(self, req, id, body): + """Unmark volume as reserved.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.unreserve_volume(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-begin_detaching') + def _begin_detaching(self, req, id, body): + """Update volume status to 'detaching'.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.begin_detaching(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-roll_detaching') + def _roll_detaching(self, req, id, body): + """Roll back volume status to 'in-use'.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + self.volume_api.roll_detaching(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-initialize_connection') + def _initialize_connection(self, req, id, body): + """Initialize volume attachment.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + connector = body['os-initialize_connection']['connector'] + info = self.volume_api.initialize_connection(context, + volume, + connector) + return {'connection_info': info} + + @wsgi.action('os-terminate_connection') + def _terminate_connection(self, req, id, body): + """Terminate volume attachment.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + connector = body['os-terminate_connection']['connector'] + self.volume_api.terminate_connection(context, volume, connector) + return webob.Response(status_int=202) + + @wsgi.response(202) + @wsgi.action('os-volume_upload_image') + @wsgi.serializers(xml=VolumeToImageSerializer) + @wsgi.deserializers(xml=VolumeToImageDeserializer) + def _volume_upload_image(self, req, id, body): + """Uploads the specified volume to image service.""" + context = req.environ['cinder.context'] + try: + params = body['os-volume_upload_image'] + except (TypeError, KeyError): + msg = _("Invalid request body") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not params.get("image_name"): + msg = _("No image_name was specified in request.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + force = params.get('force', False) + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound, error: + raise webob.exc.HTTPNotFound(explanation=unicode(error)) + authorize(context, "upload_image") + image_metadata = {"container_format": params.get("container_format", + "bare"), + "disk_format": params.get("disk_format", "raw"), + "name": params["image_name"]} + try: + response = self.volume_api.copy_volume_to_image(context, + volume, + image_metadata, + force) + except exception.InvalidVolume, error: + raise webob.exc.HTTPBadRequest(explanation=unicode(error)) + except ValueError, error: + raise webob.exc.HTTPBadRequest(explanation=unicode(error)) + except rpc_common.RemoteError as error: + msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, + 'err_msg': error.value} + raise webob.exc.HTTPBadRequest(explanation=msg) + return {'os-volume_upload_image': response} + + +class Volume_actions(extensions.ExtensionDescriptor): + """Enable volume actions + """ + + name = "VolumeActions" + alias = "os-volume-actions" + namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1" + updated = "2012-05-31T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeActionsController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] diff --git a/cinder/api/contrib/volume_host_attribute.py b/cinder/api/contrib/volume_host_attribute.py new file mode 100644 index 0000000000..183e72587d --- /dev/null +++ b/cinder/api/contrib/volume_host_attribute.py @@ -0,0 +1,93 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder.openstack.common import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) +authorize = extensions.soft_extension_authorizer('volume', + 'volume_host_attribute') + + +class VolumeHostAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeHostAttributeController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_volume_host_attribute(self, context, resp_volume): + try: + db_volume = self.volume_api.get(context, resp_volume['id']) + except Exception: + return + else: + key = "%s:host" % Volume_host_attribute.alias + resp_volume[key] = db_volume['host'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeHostAttributeTemplate()) + self._add_volume_host_attribute(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListHostAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_host_attribute(context, volume) + + +class Volume_host_attribute(extensions.ExtensionDescriptor): + """Expose host as an attribute of a volume.""" + + name = "VolumeHostAttribute" + alias = "os-vol-host-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_host_attribute/api/v1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeHostAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}host' % Volume_host_attribute.namespace, + '%s:host' % Volume_host_attribute.alias) + + +class VolumeHostAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_host_attribute.alias + namespace = Volume_host_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListHostAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_host_attribute.alias + namespace = Volume_host_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_image_metadata.py b/cinder/api/contrib/volume_image_metadata.py new file mode 100644 index 0000000000..58e5cd2951 --- /dev/null +++ b/cinder/api/contrib/volume_image_metadata.py @@ -0,0 +1,106 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Volume Image Metadata API extension.""" + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_image_metadata') + + +class VolumeImageMetadataController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeImageMetadataController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_image_metadata(self, context, resp_volume): + try: + image_meta = self.volume_api.get_volume_image_metadata( + context, resp_volume) + except Exception: + return + else: + if image_meta: + resp_volume['volume_image_metadata'] = dict( + image_meta.iteritems()) + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeImageMetadataTemplate()) + self._add_image_metadata(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumesImageMetadataTemplate()) + for volume in list(resp_obj.obj.get('volumes', [])): + self._add_image_metadata(context, volume) + + +class Volume_image_metadata(extensions.ExtensionDescriptor): + """Show image metadata associated with the volume""" + + name = "VolumeImageMetadata" + alias = "os-vol-image-meta" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_image_metadata/api/v1") + updated = "2012-12-07T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeImageMetadataController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +class VolumeImageMetadataMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_image_metadata', + selector='volume_image_metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + + return xmlutil.MasterTemplate(root, 1) + + +class VolumeImageMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + root.append(VolumeImageMetadataMetadataTemplate()) + + alias = Volume_image_metadata.alias + namespace = Volume_image_metadata.namespace + + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumesImageMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volume') + elem.append(VolumeImageMetadataMetadataTemplate()) + + alias = Volume_image_metadata.alias + namespace = Volume_image_metadata.namespace + + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_tenant_attribute.py b/cinder/api/contrib/volume_tenant_attribute.py new file mode 100644 index 0000000000..620ae10065 --- /dev/null +++ b/cinder/api/contrib/volume_tenant_attribute.py @@ -0,0 +1,91 @@ +# Copyright 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_tenant_attribute') + + +class VolumeTenantAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeTenantAttributeController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_volume_tenant_attribute(self, context, resp_volume): + try: + db_volume = self.volume_api.get(context, resp_volume['id']) + except Exception: + return + else: + key = "%s:tenant_id" % Volume_tenant_attribute.alias + resp_volume[key] = db_volume['project_id'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeTenantAttributeTemplate()) + self._add_volume_tenant_attribute(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListTenantAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_tenant_attribute(context, volume) + + +class Volume_tenant_attribute(extensions.ExtensionDescriptor): + """Expose the internal project_id as an attribute of a volume.""" + + name = "VolumeTenantAttribute" + alias = "os-vol-tenant-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_tenant_attribute/api/v1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTenantAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}tenant_id' % Volume_tenant_attribute.namespace, + '%s:tenant_id' % Volume_tenant_attribute.alias) + + +class VolumeTenantAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_tenant_attribute.alias + namespace = Volume_tenant_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListTenantAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_tenant_attribute.alias + namespace = Volume_tenant_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/extensions.py b/cinder/api/extensions.py new file mode 100644 index 0000000000..67e682a0cc --- /dev/null +++ b/cinder/api/extensions.py @@ -0,0 +1,407 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import webob.dec +import webob.exc + +import cinder.api.openstack +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import exception as common_exception +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +import cinder.policy + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + + """ + + # The name of the extension, e.g., 'Fox In Socks' + name = None + + # The alias for the extension, e.g., 'FOXNSOX' + alias = None + + # Description comes from the docstring for the class + + # The XML namespace for the extension, e.g., + # 'http://www.fox.in.socks/api/ext/pie/v1.0' + namespace = None + + # The timestamp when the extension was last updated, e.g., + # '2011-01-22T13:25:27-06:00' + updated = None + + def __init__(self, ext_mgr): + """Register extension with the extension manager.""" + + ext_mgr.register(self) + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + + """ + resources = [] + return resources + + def get_controller_extensions(self): + """List of extensions.ControllerExtension extension objects. + + Controller extensions are used to extend existing controllers. + """ + controller_exts = [] + return controller_exts + + @classmethod + def nsmap(cls): + """Synthesize a namespace map from extension.""" + + # Start with a base nsmap + nsmap = ext_nsmap.copy() + + # Add the namespace for the extension + nsmap[cls.alias] = cls.namespace + + return nsmap + + @classmethod + def xmlname(cls, name): + """Synthesize element and attribute names.""" + + return '{%s}%s' % (cls.namespace, name) + + +def make_ext(elem): + elem.set('name') + elem.set('namespace') + elem.set('alias') + elem.set('updated') + + desc = xmlutil.SubTemplateElement(elem, 'description') + desc.text = 'description' + + xmlutil.make_links(elem, 'links') + + +ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class ExtensionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extension', selector='extension') + make_ext(root) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extensions') + elem = xmlutil.SubTemplateElement(root, 'extension', + selector='extensions') + make_ext(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsResource(wsgi.Resource): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + super(ExtensionsResource, self).__init__(None) + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.name + ext_data['alias'] = ext.alias + ext_data['description'] = ext.__doc__ + ext_data['namespace'] = ext.namespace + ext_data['updated'] = ext.updated + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + @wsgi.serializers(xml=ExtensionsTemplate) + def index(self, req): + extensions = [] + for _alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + @wsgi.serializers(xml=ExtensionTemplate) + def show(self, req, id): + try: + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions[id] + except KeyError: + raise webob.exc.HTTPNotFound() + + return dict(extension=self._translate(ext)) + + def delete(self, req, id): + raise webob.exc.HTTPNotFound() + + def create(self, req): + raise webob.exc.HTTPNotFound() + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See cinder/tests/api/extensions/foxinsocks/extension.py for an + example extension implementation. + + """ + + def __init__(self): + LOG.audit(_('Initializing extension manager.')) + + self.cls_list = FLAGS.osapi_volume_extension + self.extensions = {} + self._load_extensions() + + def is_loaded(self, alias): + return alias in self.extensions + + def register(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.alias + LOG.audit(_('Loaded extension: %s'), alias) + + if alias in self.extensions: + raise exception.Error("Found duplicate extension: %s" % alias) + self.extensions[alias] = ext + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionsResource(self))) + + for ext in self.extensions.values(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_controller_extensions(self): + """Returns a list of ControllerExtension objects.""" + controller_exts = [] + for ext in self.extensions.values(): + try: + get_ext_method = ext.get_controller_extensions + except AttributeError: + # NOTE(Vek): Extensions aren't required to have + # controller extensions + continue + controller_exts.extend(get_ext_method()) + return controller_exts + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug(_('Ext name: %s'), extension.name) + LOG.debug(_('Ext alias: %s'), extension.alias) + LOG.debug(_('Ext description: %s'), + ' '.join(extension.__doc__.strip().split())) + LOG.debug(_('Ext namespace: %s'), extension.namespace) + LOG.debug(_('Ext updated: %s'), extension.updated) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + + return True + + def load_extension(self, ext_factory): + """Execute an extension factory. + + Loads an extension. The 'ext_factory' is the name of a + callable that will be imported and called with one + argument--the extension manager. The factory callable is + expected to call the register() method at least once. + """ + + LOG.debug(_("Loading extension %s"), ext_factory) + + # Load the factory + factory = importutils.import_class(ext_factory) + + # Call it + LOG.debug(_("Calling extension factory %s"), ext_factory) + factory(self) + + def _load_extensions(self): + """Load extensions specified on the command line.""" + + extensions = list(self.cls_list) + + # NOTE(thingee): Backwards compat for the old extension loader path. + # We can drop this post-grizzly in the H release. + old_contrib_path = ('cinder.api.openstack.volume.contrib.' + 'standard_extensions') + new_contrib_path = 'cinder.api.contrib.standard_extensions' + if old_contrib_path in extensions: + LOG.warn(_('osapi_volume_extension is set to deprecated path: %s'), + old_contrib_path) + LOG.warn(_('Please set your flag or cinder.conf settings for ' + 'osapi_volume_extension to: %s'), new_contrib_path) + extensions = [e.replace(old_contrib_path, new_contrib_path) + for e in extensions] + + for ext_factory in extensions: + try: + self.load_extension(ext_factory) + except Exception as exc: + LOG.warn(_('Failed to load extension %(ext_factory)s: ' + '%(exc)s') % locals()) + + +class ControllerExtension(object): + """Extend core controllers of cinder OpenStack API. + + Provide a way to extend existing cinder OpenStack API core + controllers. + """ + + def __init__(self, extension, collection, controller): + self.extension = extension + self.collection = collection + self.controller = controller + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in cinder.""" + + def __init__(self, collection, controller, parent=None, + collection_actions=None, member_actions=None, + custom_routes_fn=None): + if not collection_actions: + collection_actions = {} + if not member_actions: + member_actions = {} + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.custom_routes_fn = custom_routes_fn + + +def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): + """Registers all standard API extensions.""" + + # Walk through all the modules in our directory... + our_dir = path[0] + for dirpath, dirnames, filenames in os.walk(our_dir): + # Compute the relative package name from the dirpath + relpath = os.path.relpath(dirpath, our_dir) + if relpath == '.': + relpkg = '' + else: + relpkg = '.%s' % '.'.join(relpath.split(os.sep)) + + # Now, consider each file in turn, only considering .py files + for fname in filenames: + root, ext = os.path.splitext(fname) + + # Skip __init__ and anything that's not .py + if ext != '.py' or root == '__init__': + continue + + # Try loading it + classname = "%s%s" % (root[0].upper(), root[1:]) + classpath = ("%s%s.%s.%s" % + (package, relpkg, root, classname)) + + if ext_list is not None and classname not in ext_list: + logger.debug("Skipping extension: %s" % classpath) + continue + + try: + ext_mgr.load_extension(classpath) + except Exception as exc: + logger.warn(_('Failed to load extension %(classpath)s: ' + '%(exc)s') % locals()) + + # Now, let's consider any subdirectories we may have... + subdirs = [] + for dname in dirnames: + # Skip it if it does not have __init__.py + if not os.path.exists(os.path.join(dirpath, dname, + '__init__.py')): + continue + + # If it has extension(), delegate... + ext_name = ("%s%s.%s.extension" % + (package, relpkg, dname)) + try: + ext = importutils.import_class(ext_name) + except common_exception.NotFound: + # extension() doesn't exist on it, so we'll explore + # the directory for ourselves + subdirs.append(dname) + else: + try: + ext(ext_mgr) + except Exception as exc: + logger.warn(_('Failed to load extension %(ext_name)s: ' + '%(exc)s') % locals()) + + # Update the list of directories we'll explore... + dirnames[:] = subdirs + + +def extension_authorizer(api_name, extension_name): + def authorize(context, target=None): + if target is None: + target = {'project_id': context.project_id, + 'user_id': context.user_id} + action = '%s_extension:%s' % (api_name, extension_name) + cinder.policy.enforce(context, action, target) + return authorize + + +def soft_extension_authorizer(api_name, extension_name): + hard_authorize = extension_authorizer(api_name, extension_name) + + def authorize(context): + try: + hard_authorize(context) + return True + except exception.NotAuthorized: + return False + return authorize diff --git a/cinder/api/middleware/__init__.py b/cinder/api/middleware/__init__.py new file mode 100644 index 0000000000..d65c689a83 --- /dev/null +++ b/cinder/api/middleware/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/middleware/auth.py b/cinder/api/middleware/auth.py new file mode 100644 index 0000000000..6c6e23862c --- /dev/null +++ b/cinder/api/middleware/auth.py @@ -0,0 +1,140 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" +import os + +from oslo.config import cfg +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import context +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import wsgi as base_wsgi + +use_forwarded_for_opt = cfg.BoolOpt( + 'use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(use_forwarded_for_opt) +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[FLAGS.auth_strategy] + if not FLAGS.api_rate_limit: + limit_name = FLAGS.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app + + +class InjectContext(base_wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=base_wsgi.Request) + def __call__(self, req): + req.environ['cinder.context'] = self.context + return self.application + + +class CinderKeystoneContext(base_wsgi.Middleware): + """Make a request context from keystone headers""" + + @webob.dec.wsgify(RequestClass=base_wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + # get the roles + roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + roles=roles, + auth_token=auth_token, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application + + +class NoAuthMiddleware(base_wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'X-Auth-Token' not in req.headers: + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + os_url = os.path.join(req.url, project_id) + res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.content_type = 'text/plain' + res.status = '204' + return res + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/middleware/fault.py b/cinder/api/middleware/fault.py new file mode 100644 index 0000000000..dddd166ac5 --- /dev/null +++ b/cinder/api/middleware/fault.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class FaultWrapper(base_wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" + + _status_to_type = {} + + @staticmethod + def status_to_type(status): + if not FaultWrapper._status_to_type: + for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): + FaultWrapper._status_to_type[clazz.code] = clazz + return FaultWrapper._status_to_type.get( + status, webob.exc.HTTPInternalServerError)() + + def _error(self, inner, req): + LOG.exception(_("Caught error: %s"), unicode(inner)) + + safe = getattr(inner, 'safe', False) + headers = getattr(inner, 'headers', None) + status = getattr(inner, 'code', 500) + if status is None: + status = 500 + + msg_dict = dict(url=req.url, status=status) + LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + outer = self.status_to_type(status) + if headers: + outer.headers = headers + # NOTE(johannes): We leave the explanation empty here on + # purpose. It could possibly have sensitive information + # that should not be returned back to the user. See + # bugs 868360 and 874472 + # NOTE(eglynn): However, it would be over-conservative and + # inconsistent with the EC2 API to hide every exception, + # including those that are safe to expose, see bug 1021373 + if safe: + outer.explanation = '%s: %s' % (inner.__class__.__name__, + unicode(inner)) + return wsgi.Fault(outer) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + return self._error(ex, req) diff --git a/cinder/api/middleware/sizelimit.py b/cinder/api/middleware/sizelimit.py new file mode 100644 index 0000000000..868db0f39e --- /dev/null +++ b/cinder/api/middleware/sizelimit.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Request Body limiting middleware. + +""" + +from oslo.config import cfg +import webob.dec +import webob.exc + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import wsgi + +#default request size is 112k +max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', + default=114688, + help='Max size for body of a request') + +FLAGS = flags.FLAGS +FLAGS.register_opt(max_request_body_size_opt) +LOG = logging.getLogger(__name__) + + +class LimitingReader(object): + """Reader to limit the size of an incoming request.""" + def __init__(self, data, limit): + """ + :param data: Underlying data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + else: + yield chunk + + def read(self, i=None): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + return result + + +class RequestBodySizeLimiter(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, *args, **kwargs): + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if req.content_length > FLAGS.osapi_max_request_body_size: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + if req.content_length is None and req.is_body_readable: + limiter = LimitingReader(req.body_file, + FLAGS.osapi_max_request_body_size) + req.body_file = limiter + return self.application diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py new file mode 100644 index 0000000000..a3b2e6da2e --- /dev/null +++ b/cinder/api/openstack/__init__.py @@ -0,0 +1,130 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack API controllers. +""" + +import routes + +from cinder.api.middleware import fault +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class APIMapper(routes.Mapper): + def routematch(self, url=None, environ=None): + if url is "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + +class ProjectMapper(APIMapper): + def resource(self, member_name, collection_name, **kwargs): + if 'parent_resource' not in kwargs: + kwargs['path_prefix'] = '{project_id}/' + else: + parent_resource = kwargs['parent_resource'] + p_collection = parent_resource['collection_name'] + p_member = parent_resource['member_name'] + kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, + p_member) + routes.Mapper.resource(self, + member_name, + collection_name, + **kwargs) + + +class APIRouter(base_wsgi.Router): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = None # override in subclasses + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have""" + return cls() + + def __init__(self, ext_mgr=None): + if ext_mgr is None: + if self.ExtensionManager: + ext_mgr = self.ExtensionManager() + else: + raise Exception(_("Must specify an ExtensionManager class")) + + mapper = ProjectMapper() + self.resources = {} + self._setup_routes(mapper, ext_mgr) + self._setup_ext_routes(mapper, ext_mgr) + self._setup_extensions(ext_mgr) + super(APIRouter, self).__init__(mapper) + + def _setup_ext_routes(self, mapper, ext_mgr): + for resource in ext_mgr.get_resources(): + LOG.debug(_('Extended resource: %s'), + resource.collection) + + wsgi_resource = wsgi.Resource(resource.controller) + self.resources[resource.collection] = wsgi_resource + kargs = dict( + controller=wsgi_resource, + collection=resource.collection_actions, + member=resource.member_actions) + + if resource.parent: + kargs['parent_resource'] = resource.parent + + mapper.resource(resource.collection, resource.collection, **kargs) + + if resource.custom_routes_fn: + resource.custom_routes_fn(mapper, wsgi_resource) + + def _setup_extensions(self, ext_mgr): + for extension in ext_mgr.get_controller_extensions(): + ext_name = extension.extension.name + collection = extension.collection + controller = extension.controller + + if collection not in self.resources: + LOG.warning(_('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource') % + locals()) + continue + + LOG.debug(_('Extension %(ext_name)s extending resource: ' + '%(collection)s') % locals()) + + resource = self.resources[collection] + resource.register_actions(controller) + resource.register_extensions(controller) + + def _setup_routes(self, mapper, ext_mgr): + raise NotImplementedError + + +class FaultWrapper(fault.FaultWrapper): + def __init__(self, application): + LOG.warn(_('cinder.api.openstack:FaultWrapper is deprecated. Please ' + 'use cinder.api.middleware.fault:FaultWrapper instead.')) + super(FaultWrapper, self).__init__(application) diff --git a/cinder/api/openstack/urlmap.py b/cinder/api/openstack/urlmap.py new file mode 100644 index 0000000000..ab240709dd --- /dev/null +++ b/cinder/api/openstack/urlmap.py @@ -0,0 +1,27 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import urlmap +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def urlmap_factory(loader, global_conf, **local_conf): + LOG.warn(_('cinder.api.openstack.urlmap:urlmap_factory is deprecated. ' + 'Please use cinder.api.urlmap:urlmap_factory instead.')) + urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/openstack/volume/__init__.py b/cinder/api/openstack/volume/__init__.py new file mode 100644 index 0000000000..875db43472 --- /dev/null +++ b/cinder/api/openstack/volume/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.v1.router import APIRouter as v1_router +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class APIRouter(v1_router): + def __init__(self, ext_mgr=None): + LOG.warn(_('cinder.api.openstack.volume:APIRouter is deprecated. ' + 'Please use cinder.api.v1.router:APIRouter instead.')) + super(APIRouter, self).__init__(ext_mgr) diff --git a/cinder/api/openstack/volume/versions.py b/cinder/api/openstack/volume/versions.py new file mode 100644 index 0000000000..857b7698c7 --- /dev/null +++ b/cinder/api/openstack/volume/versions.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class Versions(versions.Versions): + def __init__(self): + LOG.warn(_('cinder.api.openstack.volume.versions.Versions is ' + 'deprecated. Please use cinder.api.versions.Versions ' + 'instead.')) + super(Versions, self).__init__() diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py new file mode 100644 index 0000000000..cc882c826a --- /dev/null +++ b/cinder/api/openstack/wsgi.py @@ -0,0 +1,1144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import math +import time +import webob + +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi + +from lxml import etree +from xml.dom import minidom +from xml.parsers import expat + + +XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + +LOG = logging.getLogger(__name__) + +# The vendor content types should serialize identically to the non-vendor +# content types. So to avoid littering the code with both options, we +# map the vendor to the other when looking up the type +_CONTENT_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'application/json', + 'application/vnd.openstack.volume+xml': 'application/xml', +} + +SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.volume+json', + 'application/xml', + 'application/vnd.openstack.volume+xml', +) + +_MEDIA_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'json', + 'application/json': 'json', + 'application/vnd.openstack.volume+xml': 'xml', + 'application/xml': 'xml', + 'application/atom+xml': 'atom', +} + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def best_match_content_type(self): + """Determine the requested response content-type.""" + if 'cinder.best_content_type' not in self.environ: + # Calculate the best MIME type + content_type = None + + # Check URL path suffix + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in SUPPORTED_CONTENT_TYPES: + content_type = possible_type + + if not content_type: + content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) + + self.environ['cinder.best_content_type'] = (content_type or + 'application/json') + + return self.environ['cinder.best_content_type'] + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if "Content-Type" not in self.headers: + return None + + allowed_types = SUPPORTED_CONTENT_TYPES + content_type = self.content_type + + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) + + return content_type + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def _from_xml(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + + try: + node = utils.safe_minidom_parse_string(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" + + def find_attribute_or_element(self, parent, name): + """Get an attribute value; fallback to an element if not found""" + if parent.hasAttribute(name): + return parent.getAttribute(name) + + node = self.find_first_child_named(parent, name) + if node: + return self.extract_text(node) + + return None + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + +class MetadataXMLDeserializer(XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + metadata = {} + if metadata_node is not None: + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + +class DictSerializer(ActionDispatcher): + """Default request body serialization""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization""" + + def default(self, data): + return jsonutils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + return self.to_xml_string(node) + + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml('UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, has_atom=False): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) + if has_atom: + node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + def _create_link_nodes(self, xml_doc, links): + link_nodes = [] + for link in links: + link_node = xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) + link_nodes.append(link_node) + return link_nodes + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + + +def serializers(**serializers): + """Attaches serializers to a method. + + This decorator associates a dictionary of serializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_serializers'): + func.wsgi_serializers = {} + func.wsgi_serializers.update(serializers) + return func + return decorator + + +def deserializers(**deserializers): + """Attaches deserializers to a method. + + This decorator associates a dictionary of deserializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_deserializers'): + func.wsgi_deserializers = {} + func.wsgi_deserializers.update(deserializers) + return func + return decorator + + +def response(code): + """Attaches response code to a method. + + This decorator associates a response code with a method. Note + that the function attributes are directly manipulated; the method + is not wrapped. + """ + + def decorator(func): + func.wsgi_code = code + return func + return decorator + + +class ResponseObject(object): + """Bundles a response object with appropriate serializers. + + Object that app methods may return in order to bind alternate + serializers with a response object to be serialized. Its use is + optional. + """ + + def __init__(self, obj, code=None, **serializers): + """Binds serializers with an object. + + Takes keyword arguments akin to the @serializer() decorator + for specifying serializers. Serializers specified will be + given preference over default serializers or method-specific + serializers on return. + """ + + self.obj = obj + self.serializers = serializers + self._default_code = 200 + self._code = code + self._headers = {} + self.serializer = None + self.media_type = None + + def __getitem__(self, key): + """Retrieves a header with the given name.""" + + return self._headers[key.lower()] + + def __setitem__(self, key, value): + """Sets a header with the given name to the given value.""" + + self._headers[key.lower()] = value + + def __delitem__(self, key): + """Deletes the header with the given name.""" + + del self._headers[key.lower()] + + def _bind_method_serializers(self, meth_serializers): + """Binds method serializers with the response object. + + Binds the method serializers with the response object. + Serializers specified to the constructor will take precedence + over serializers specified to this method. + + :param meth_serializers: A dictionary with keys mapping to + response types and values containing + serializer objects. + """ + + # We can't use update because that would be the wrong + # precedence + for mtype, serializer in meth_serializers.items(): + self.serializers.setdefault(mtype, serializer) + + def get_serializer(self, content_type, default_serializers=None): + """Returns the serializer for the wrapped object. + + Returns the serializer for the wrapped object subject to the + indicated content type. If no serializer matching the content + type is attached, an appropriate serializer drawn from the + default serializers will be used. If no appropriate + serializer is available, raises InvalidContentType. + """ + + default_serializers = default_serializers or {} + + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in self.serializers: + return mtype, self.serializers[mtype] + else: + return mtype, default_serializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def preserialize(self, content_type, default_serializers=None): + """Prepares the serializer that will be used to serialize. + + Determines the serializer that will be used and prepares an + instance of it for later call. This allows the serializer to + be accessed by extensions for, e.g., template extension. + """ + + mtype, serializer = self.get_serializer(content_type, + default_serializers) + self.media_type = mtype + self.serializer = serializer() + + def attach(self, **kwargs): + """Attach slave templates to serializers.""" + + if self.media_type in kwargs: + self.serializer.attach(kwargs[self.media_type]) + + def serialize(self, request, content_type, default_serializers=None): + """Serializes the wrapped object. + + Utility method for serializing the wrapped object. Returns a + webob.Response object. + """ + + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = value + response.headers['Content-Type'] = content_type + if self.obj is not None: + response.body = serializer.serialize(self.obj) + + return response + + @property + def code(self): + """Retrieve the response status.""" + + return self._code or self._default_code + + @property + def headers(self): + """Retrieve the headers.""" + + return self._headers.copy() + + +def action_peek_json(body): + """Determine action to invoke.""" + + try: + decoded = jsonutils.loads(body) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + # Make sure there's exactly one key... + if len(decoded) != 1: + msg = _("too many body keys") + raise exception.MalformedRequestBody(reason=msg) + + # Return the action and the decoded body... + return decoded.keys()[0] + + +def action_peek_xml(body): + """Determine action to invoke.""" + + dom = utils.safe_minidom_parse_string(body) + action_node = dom.childNodes[0] + + return action_node.tagName + + +class ResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + msg = unicode(ex_value) + raise Fault(webob.exc.HTTPForbidden(explanation=msg)) + elif isinstance(ex_value, exception.Invalid): + raise Fault(exception.ConvertedException( + code=ex_value.code, explanation=unicode(ex_value))) + elif isinstance(ex_value, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_( + 'Exception handling resource: %s') % + ex_value, exc_info=exc_info) + raise Fault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, Fault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise Fault(ex_value) + + # We didn't handle the exception + return False + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + Exceptions derived from webob.exc.HTTPException will be automatically + wrapped in Fault() to provide API friendly error responses. + + """ + + def __init__(self, controller, action_peek=None, **deserializers): + """ + :param controller: object that implement methods created by routes lib + :param action_peek: dictionary of routines for peeking into an action + request body to determine the desired action + """ + + self.controller = controller + + default_deserializers = dict(xml=XMLDeserializer, + json=JSONDeserializer) + default_deserializers.update(deserializers) + + self.default_deserializers = default_deserializers + self.default_serializers = dict(xml=XMLDictSerializer, + json=JSONDictSerializer) + + self.action_peek = dict(xml=action_peek_xml, + json=action_peek_json) + self.action_peek.update(action_peek or {}) + + # Copy over the actions dictionary + self.wsgi_actions = {} + if controller: + self.register_actions(controller) + + # Save a mapping of extensions + self.wsgi_extensions = {} + self.wsgi_action_extensions = {} + + def register_actions(self, controller): + """Registers controller actions with this resource.""" + + actions = getattr(controller, 'wsgi_actions', {}) + for key, method_name in actions.items(): + self.wsgi_actions[key] = getattr(controller, method_name) + + def register_extensions(self, controller): + """Registers controller extensions with this resource.""" + + extensions = getattr(controller, 'wsgi_extensions', []) + for method_name, action_name in extensions: + # Look up the extending method + extension = getattr(controller, method_name) + + if action_name: + # Extending an action... + if action_name not in self.wsgi_action_extensions: + self.wsgi_action_extensions[action_name] = [] + self.wsgi_action_extensions[action_name].append(extension) + else: + # Extending a regular method + if method_name not in self.wsgi_extensions: + self.wsgi_extensions[method_name] = [] + self.wsgi_extensions[method_name].append(extension) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + + # NOTE(Vek): Check for get_action_args() override in the + # controller + if hasattr(self.controller, 'get_action_args'): + return self.controller.get_action_args(request_environment) + + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except (KeyError, IndexError, AttributeError): + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + def get_body(self, request): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return None, '' + + if not content_type: + LOG.debug(_("No Content-Type provided in request")) + return None, '' + + if len(request.body) <= 0: + LOG.debug(_("Empty body provided in request")) + return None, '' + + return content_type, request.body + + def deserialize(self, meth, content_type, body): + meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in meth_deserializers: + deserializer = meth_deserializers[mtype] + else: + deserializer = self.default_deserializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + return deserializer().deserialize(body) + + def pre_process_extensions(self, extensions, request, action_args): + # List of callables for post-processing extensions + post = [] + + for ext in extensions: + if inspect.isgeneratorfunction(ext): + response = None + + # If it's a generator function, the part before the + # yield is the preprocessing stage + try: + with ResourceExceptionHandler(): + gen = ext(req=request, **action_args) + response = gen.next() + except Fault as ex: + response = ex + + # We had a response... + if response: + return response, [] + + # No response, queue up generator for post-processing + post.append(gen) + else: + # Regular functions only perform post-processing + post.append(ext) + + # Run post-processing in the reverse order + return None, reversed(post) + + def post_process_extensions(self, extensions, resp_obj, request, + action_args): + for ext in extensions: + response = None + if inspect.isgenerator(ext): + # If it's a generator, run the second half of + # processing + try: + with ResourceExceptionHandler(): + response = ext.send(resp_obj) + except StopIteration: + # Normal exit of generator + continue + except Fault as ex: + response = ex + else: + # Regular functions get post-processing... + try: + with ResourceExceptionHandler(): + response = ext(req=request, resp_obj=resp_obj, + **action_args) + except Fault as ex: + response = ex + + # We had a response... + if response: + return response + + return None + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) + + # Identify the action, its arguments, and the requested + # content type + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + content_type, body = self.get_body(request) + accept = request.best_match_content_type() + + # NOTE(Vek): Splitting the function up this way allows for + # auditing by external tools that wrap the existing + # function. If we try to audit __call__(), we can + # run into troubles due to the @webob.dec.wsgify() + # decorator. + return self._process_stack(request, action, action_args, + content_type, body, accept) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + + # Get the implementing method + try: + meth, extensions = self.get_method(request, action, + content_type, body) + except (AttributeError, TypeError): + return Fault(webob.exc.HTTPNotFound()) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Now, deserialize the request body... + try: + if content_type: + contents = self.deserialize(meth, content_type, body) + else: + contents = {} + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Update the action args + action_args.update(contents) + + project_id = action_args.pop("project_id", None) + context = request.environ.get('cinder.context') + if (context and project_id and (project_id != context.project_id)): + msg = _("Malformed request url") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Run pre-processing extensions + response, post = self.pre_process_extensions(extensions, + request, action_args) + + if not response: + try: + with ResourceExceptionHandler(): + action_result = self.dispatch(meth, request, action_args) + except Fault as ex: + response = ex + + if not response: + # No exceptions; convert action_result into a + # ResponseObject + resp_obj = None + if type(action_result) is dict or action_result is None: + resp_obj = ResponseObject(action_result) + elif isinstance(action_result, ResponseObject): + resp_obj = action_result + else: + response = action_result + + # Run post-processing extensions + if resp_obj: + _set_request_id_header(request, resp_obj) + # Do a preserialize to set up the response object + serializers = getattr(meth, 'wsgi_serializers', {}) + resp_obj._bind_method_serializers(serializers) + if hasattr(meth, 'wsgi_code'): + resp_obj._default_code = meth.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + + # Process post-processing extensions + response = self.post_process_extensions(post, resp_obj, + request, action_args) + + if resp_obj and not response: + response = resp_obj.serialize(request, accept, + self.default_serializers) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError, e: + msg_dict = dict(url=request.url, e=e) + msg = _("%(url)s returned a fault: %(e)s") % msg_dict + + LOG.info(msg) + + return response + + def get_method(self, request, action, content_type, body): + """Look up the action-specific method and its extensions.""" + + # Look up the method + try: + if not self.controller: + meth = getattr(self, action) + else: + meth = getattr(self.controller, action) + except AttributeError: + if (not self.wsgi_actions or + action not in ['action', 'create', 'delete']): + # Propagate the error + raise + else: + return meth, self.wsgi_extensions.get(action, []) + + if action == 'action': + # OK, it's an action; figure out which action... + mtype = _MEDIA_TYPE_MAP.get(content_type) + action_name = self.action_peek[mtype](body) + LOG.debug("Action body: %s" % body) + else: + action_name = action + + # Look up the action method + return (self.wsgi_actions[action_name], + self.wsgi_action_extensions.get(action_name, [])) + + def dispatch(self, method, request, action_args): + """Dispatch a call to the action-specific method.""" + + return method(req=request, **action_args) + + +def action(name): + """Mark a function as an action. + + The given name will be taken as the action key in the body. + + This is also overloaded to allow extensions to provide + non-extending definitions of create and delete operations. + """ + + def decorator(func): + func.wsgi_action = name + return func + return decorator + + +def extends(*args, **kwargs): + """Indicate a function extends an operation. + + Can be used as either:: + + @extends + def index(...): + pass + + or as:: + + @extends(action='resize') + def _action_resize(...): + pass + """ + + def decorator(func): + # Store enough information to find what we're extending + func.wsgi_extends = (func.__name__, kwargs.get('action')) + return func + + # If we have positional arguments, call the decorator + if args: + return decorator(*args) + + # OK, return the decorator instead + return decorator + + +class ControllerMetaclass(type): + """Controller metaclass. + + This metaclass automates the task of assembling a dictionary + mapping action keys to method names. + """ + + def __new__(mcs, name, bases, cls_dict): + """Adds the wsgi_actions dictionary to the class.""" + + # Find all actions + actions = {} + extensions = [] + # start with wsgi actions from base classes + for base in bases: + actions.update(getattr(base, 'wsgi_actions', {})) + for key, value in cls_dict.items(): + if not callable(value): + continue + if getattr(value, 'wsgi_action', None): + actions[value.wsgi_action] = key + elif getattr(value, 'wsgi_extends', None): + extensions.append(value.wsgi_extends) + + # Add the actions and extensions to the class dict + cls_dict['wsgi_actions'] = actions + cls_dict['wsgi_extensions'] = extensions + + return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, + cls_dict) + + +class Controller(object): + """Default controller.""" + + __metaclass__ = ControllerMetaclass + + _view_builder_class = None + + def __init__(self, view_builder=None): + """Initialize controller with a view builder instance.""" + if view_builder: + self._view_builder = view_builder + elif self._view_builder_class: + self._view_builder = self._view_builder_class() + else: + self._view_builder = None + + @staticmethod + def is_valid_body(body, entity_name): + if not (body and entity_name in body): + return False + + def is_dict(d): + try: + d.get(None) + return True + except AttributeError: + return False + + if not is_dict(body[entity_name]): + return False + + return True + + +class Fault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + _fault_names = {400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = exception.status_int + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "computeFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + if code == 413: + retry = self.wrapped_exc.headers['Retry-After'] + fault_data[fault_name]['retryAfter'] = retry + + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {fault_name: 'code'}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + _set_request_id_header(req, self.wrapped_exc.headers) + + return self.wrapped_exc + + def __str__(self): + return self.wrapped_exc.__str__() + + +def _set_request_id_header(req, headers): + context = req.environ.get('cinder.context') + if context: + headers['x-compute-request-id'] = context.request_id + + +class OverLimitFault(webob.exc.HTTPException): + """ + Rate-limited request response. + """ + + def __init__(self, message, details, retry_time): + """ + Initialize new `OverLimitFault` with relevant information. + """ + hdrs = OverLimitFault._retry_after(retry_time) + self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) + self.content = { + "overLimitFault": { + "code": self.wrapped_exc.status_int, + "message": message, + "details": details, + }, + } + + @staticmethod + def _retry_after(retry_time): + delay = int(math.ceil(retry_time - time.time())) + retry_after = delay if delay > 0 else 0 + headers = {'Retry-After': '%d' % retry_after} + return headers + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """ + Return the wrapped exception with a serialized body conforming to our + error format. + """ + content_type = request.best_match_content_type() + metadata = {"attributes": {"overLimitFault": "code"}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) + self.wrapped_exc.body = content + + return self.wrapped_exc diff --git a/cinder/api/schemas/atom-link.rng b/cinder/api/schemas/atom-link.rng new file mode 100644 index 0000000000..edba5eee6c --- /dev/null +++ b/cinder/api/schemas/atom-link.rng @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + [^:]* + + + + + + .+/.+ + + + + + + [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* + + + + + + + + + + + + xml:base + xml:lang + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cinder/api/schemas/v1.1/extension.rng b/cinder/api/schemas/v1.1/extension.rng new file mode 100644 index 0000000000..b16d8c1300 --- /dev/null +++ b/cinder/api/schemas/v1.1/extension.rng @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/cinder/api/schemas/v1.1/extensions.rng b/cinder/api/schemas/v1.1/extensions.rng new file mode 100644 index 0000000000..8538eaf2da --- /dev/null +++ b/cinder/api/schemas/v1.1/extensions.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/cinder/api/schemas/v1.1/limits.rng b/cinder/api/schemas/v1.1/limits.rng new file mode 100644 index 0000000000..a66af4b9c4 --- /dev/null +++ b/cinder/api/schemas/v1.1/limits.rng @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cinder/api/schemas/v1.1/metadata.rng b/cinder/api/schemas/v1.1/metadata.rng new file mode 100644 index 0000000000..b2f5d702a2 --- /dev/null +++ b/cinder/api/schemas/v1.1/metadata.rng @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/cinder/api/sizelimit.py b/cinder/api/sizelimit.py new file mode 100644 index 0000000000..70cd45473e --- /dev/null +++ b/cinder/api/sizelimit.py @@ -0,0 +1,28 @@ +# Copyright (c) 2013 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.middleware import sizelimit +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): + def __init__(self, *args, **kwargs): + LOG.warn(_('cinder.api.sizelimit:RequestBodySizeLimiter is ' + 'deprecated. Please use cinder.api.middleware.sizelimit:' + 'RequestBodySizeLimiter instead')) + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) diff --git a/cinder/api/urlmap.py b/cinder/api/urlmap.py new file mode 100644 index 0000000000..18ec2020d0 --- /dev/null +++ b/cinder/api/urlmap.py @@ -0,0 +1,297 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap +import re +import urllib2 + +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging + + +_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' +_option_header_piece_re = re.compile( + r';\s*([^\s;=]+|%s)\s*' + r'(?:=\s*([^;]+|%s))?\s*' % + (_quoted_string_re, _quoted_string_re)) + +LOG = logging.getLogger(__name__) + + +def unquote_header_value(value): + """Unquotes a header value. + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + return value + + +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + :param value: a string with a list header. + :return: :class:`list` + """ + result = [] + for item in urllib2.parse_http_list(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +def parse_options_header(value): + """Parse a ``Content-Type`` like header into a tuple with the content + type and the options: + + >>> parse_options_header('Content-Type: text/html; mimetype=text/html') + ('Content-Type:', {'mimetype': 'text/html'}) + + :param value: the header to parse. + :return: (str, options) + """ + def _tokenize(string): + for match in _option_header_piece_re.finditer(string): + key, value = match.groups() + key = unquote_header_value(key) + if value is not None: + value = unquote_header_value(value) + yield key, value + + if not value: + return '', {} + + parts = _tokenize(';' + value) + name = parts.next()[0] + extra = dict(parts) + return name, extra + + +class Accept(object): + def __init__(self, value): + self._content_types = [parse_options_header(v) for v in + parse_list_header(value)] + + def best_match(self, supported_content_types): + # FIXME: Should we have a more sophisticated matching algorithm that + # takes into account the version as well? + best_quality = -1 + best_content_type = None + best_params = {} + best_match = '*/*' + + for content_type in supported_content_types: + for content_mask, params in self._content_types: + try: + quality = float(params.get('q', 1)) + except ValueError: + continue + + if quality < best_quality: + continue + elif best_quality == quality: + if best_match.count('*') <= content_mask.count('*'): + continue + + if self._match_mask(content_mask, content_type): + best_quality = quality + best_content_type = content_type + best_params = params + best_match = content_mask + + return best_content_type, best_params + + def content_type_params(self, best_content_type): + """Find parameters in Accept header for given content type.""" + for content_type, params in self._content_types: + if best_content_type == content_type: + return params + + return {} + + def _match_mask(self, mask, content_type): + if '*' not in mask: + return content_type == mask + if mask == '*/*': + return True + mask_major = mask[:-2] + content_type_major = content_type.split('/', 1)[0] + return content_type_major == mask_major + + +def urlmap_factory(loader, global_conf, **local_conf): + if 'not_found_app' in local_conf: + not_found_app = local_conf.pop('not_found_app') + else: + not_found_app = global_conf.get('not_found_app') + if not_found_app: + not_found_app = loader.get_app(not_found_app, global_conf=global_conf) + urlmap = URLMap(not_found_app=not_found_app) + for path, app_name in local_conf.items(): + path = paste.urlmap.parse_path_expression(path) + app = loader.get_app(app_name, global_conf=global_conf) + urlmap[path] = app + return urlmap + + +class URLMap(paste.urlmap.URLMap): + def _match(self, host, port, path_info): + """Find longest match for a given URL path.""" + for (domain, app_url), app in self.applications: + if domain and domain != host and domain != host + ':' + port: + continue + if (path_info == app_url or path_info.startswith(app_url + '/')): + return app, app_url + + return None, None + + def _set_script_name(self, app, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + return app(environ, start_response) + + return wrap + + def _munge_path(self, app, path_info, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + environ['PATH_INFO'] = path_info[len(app_url):] + return app(environ, start_response) + + return wrap + + def _path_strategy(self, host, port, path_info): + """Check path suffix for MIME type and path prefix for API version.""" + mime_type = app = app_url = None + + parts = path_info.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: + mime_type = possible_type + + parts = path_info.split('/') + if len(parts) > 1: + possible_app, possible_app_url = self._match(host, port, path_info) + # Don't use prefix if it ends up matching default + if possible_app and possible_app_url: + app_url = possible_app_url + app = self._munge_path(possible_app, path_info, app_url) + + return mime_type, app, app_url + + def _content_type_strategy(self, host, port, environ): + """Check Content-Type header for API version.""" + app = None + params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return app + + def _accept_strategy(self, host, port, environ, supported_content_types): + """Check Accept header for best matching MIME type and API version.""" + accept = Accept(environ.get('HTTP_ACCEPT', '')) + + app = None + + # Find the best match in the Accept header + mime_type, params = accept.best_match(supported_content_types) + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return mime_type, app + + def __call__(self, environ, start_response): + host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() + if ':' in host: + host, port = host.split(':', 1) + else: + if environ['wsgi.url_scheme'] == 'http': + port = '80' + else: + port = '443' + + path_info = environ['PATH_INFO'] + path_info = self.normalize_url(path_info, False)[1] + + # The MIME type for the response is determined in one of two ways: + # 1) URL path suffix (eg /servers/detail.json) + # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) + + # The API version is determined in one of three ways: + # 1) URL path prefix (eg /v1.1/tenant/servers/detail) + # 2) Content-Type header (eg application/json;version=1.1) + # 3) Accept header (eg application/json;q=0.8;version=1.1) + + supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) + + mime_type, app, app_url = self._path_strategy(host, port, path_info) + + # Accept application/atom+xml for the index query of each API + # version mount point as well as the root index + if (app_url and app_url + '/' == path_info) or path_info == '/': + supported_content_types.append('application/atom+xml') + + if not app: + app = self._content_type_strategy(host, port, environ) + + if not mime_type or not app: + possible_mime_type, possible_app = self._accept_strategy( + host, port, environ, supported_content_types) + if possible_mime_type and not mime_type: + mime_type = possible_mime_type + if possible_app and not app: + app = possible_app + + if not mime_type: + mime_type = 'application/json' + + if not app: + # Didn't match a particular version, probably matches default + app, app_url = self._match(host, port, path_info) + if app: + app = self._munge_path(app, path_info, app_url) + + if app: + environ['cinder.best_content_type'] = mime_type + return app(environ, start_response) + + environ['paste.urlmap_object'] = self + return self.not_found_application(environ, start_response) diff --git a/cinder/api/v1/__init__.py b/cinder/api/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/api/v1/limits.py b/cinder/api/v1/limits.py new file mode 100644 index 0000000000..b8a0ad848d --- /dev/null +++ b/cinder/api/v1/limits.py @@ -0,0 +1,482 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Module dedicated functions/classes dealing with rate limiting requests. +""" + +import collections +import copy +import httplib +import math +import re +import time + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.api.views import limits as limits_views +from cinder.api import xmlutil +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder import quota +from cinder import wsgi as base_wsgi + +QUOTAS = quota.QUOTAS + + +# Convenience constants for the limits dictionary passed to Limiter(). +PER_SECOND = 1 +PER_MINUTE = 60 +PER_HOUR = 60 * 60 +PER_DAY = 60 * 60 * 24 + + +limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class LimitsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('limits', selector='limits') + + rates = xmlutil.SubTemplateElement(root, 'rates') + rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') + rate.set('uri', 'uri') + rate.set('regex', 'regex') + limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') + limit.set('value', 'value') + limit.set('verb', 'verb') + limit.set('remaining', 'remaining') + limit.set('unit', 'unit') + limit.set('next-available', 'next-available') + + absolute = xmlutil.SubTemplateElement(root, 'absolute', + selector='absolute') + limit = xmlutil.SubTemplateElement(absolute, 'limit', + selector=xmlutil.get_items) + limit.set('name', 0) + limit.set('value', 1) + + return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) + + +class LimitsController(object): + """ + Controller for accessing limits in the OpenStack API. + """ + + @wsgi.serializers(xml=LimitsTemplate) + def index(self, req): + """ + Return all global and rate limit information. + """ + context = req.environ['cinder.context'] + quotas = QUOTAS.get_project_quotas(context, context.project_id, + usages=False) + abs_limits = dict((k, v['limit']) for k, v in quotas.items()) + rate_limits = req.environ.get("cinder.limits", []) + + builder = self._get_view_builder(req) + return builder.build(rate_limits, abs_limits) + + def _get_view_builder(self, req): + return limits_views.ViewBuilder() + + +def create_resource(): + return wsgi.Resource(LimitsController()) + + +class Limit(object): + """ + Stores information about a limit for HTTP requests. + """ + + UNITS = { + 1: "SECOND", + 60: "MINUTE", + 60 * 60: "HOUR", + 60 * 60 * 24: "DAY", + } + + UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) + + def __init__(self, verb, uri, regex, value, unit): + """ + Initialize a new `Limit`. + + @param verb: HTTP verb (POST, PUT, etc.) + @param uri: Human-readable URI + @param regex: Regular expression format for this limit + @param value: Integer number of requests which can be made + @param unit: Unit of measure for the value parameter + """ + self.verb = verb + self.uri = uri + self.regex = regex + self.value = int(value) + self.unit = unit + self.unit_string = self.display_unit().lower() + self.remaining = int(value) + + if value <= 0: + raise ValueError("Limit value must be > 0") + + self.last_request = None + self.next_request = None + + self.water_level = 0 + self.capacity = self.unit + self.request_value = float(self.capacity) / float(self.value) + msg = _("Only %(value)s %(verb)s request(s) can be " + "made to %(uri)s every %(unit_string)s.") + self.error_message = msg % self.__dict__ + + def __call__(self, verb, url): + """ + Represents a call to this limit from a relevant request. + + @param verb: string http verb (POST, GET, etc.) + @param url: string URL + """ + if self.verb != verb or not re.match(self.regex, url): + return + + now = self._get_time() + + if self.last_request is None: + self.last_request = now + + leak_value = now - self.last_request + + self.water_level -= leak_value + self.water_level = max(self.water_level, 0) + self.water_level += self.request_value + + difference = self.water_level - self.capacity + + self.last_request = now + + if difference > 0: + self.water_level -= self.request_value + self.next_request = now + difference + return difference + + cap = self.capacity + water = self.water_level + val = self.value + + self.remaining = math.floor(((cap - water) / cap) * val) + self.next_request = now + + def _get_time(self): + """Retrieve the current time. Broken out for testability.""" + return time.time() + + def display_unit(self): + """Display the string name of the unit.""" + return self.UNITS.get(self.unit, "UNKNOWN") + + def display(self): + """Return a useful representation of this class.""" + return { + "verb": self.verb, + "URI": self.uri, + "regex": self.regex, + "value": self.value, + "remaining": int(self.remaining), + "unit": self.display_unit(), + "resetTime": int(self.next_request or self._get_time()), + } + +# "Limit" format is a dictionary with the HTTP verb, human-readable URI, +# a regular-expression to match, value and unit of measure (PER_DAY, etc.) + +DEFAULT_LIMITS = [ + Limit("POST", "*", ".*", 10, PER_MINUTE), + Limit("POST", "*/servers", "^/servers", 50, PER_DAY), + Limit("PUT", "*", ".*", 10, PER_MINUTE), + Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), + Limit("DELETE", "*", ".*", 100, PER_MINUTE), +] + + +class RateLimitingMiddleware(base_wsgi.Middleware): + """ + Rate-limits requests passing through this middleware. All limit information + is stored in memory for this implementation. + """ + + def __init__(self, application, limits=None, limiter=None, **kwargs): + """ + Initialize new `RateLimitingMiddleware`, which wraps the given WSGI + application and sets up the given limits. + + @param application: WSGI application to wrap + @param limits: String describing limits + @param limiter: String identifying class for representing limits + + Other parameters are passed to the constructor for the limiter. + """ + base_wsgi.Middleware.__init__(self, application) + + # Select the limiter class + if limiter is None: + limiter = Limiter + else: + limiter = importutils.import_class(limiter) + + # Parse the limits, if any are provided + if limits is not None: + limits = limiter.parse_limits(limits) + + self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """ + Represents a single call through this middleware. We should record the + request if we have a limit relevant to it. If no limit is relevant to + the request, ignore it. + + If the request should be rate limited, return a fault telling the user + they are over the limit and need to retry later. + """ + verb = req.method + url = req.url + context = req.environ.get("cinder.context") + + if context: + username = context.user_id + else: + username = None + + delay, error = self._limiter.check_for_delay(verb, url, username) + + if delay: + msg = _("This request was rate-limited.") + retry = time.time() + delay + return wsgi.OverLimitFault(msg, error, retry) + + req.environ["cinder.limits"] = self._limiter.get_limits(username) + + return self.application + + +class Limiter(object): + """ + Rate-limit checking class which handles limits in memory. + """ + + def __init__(self, limits, **kwargs): + """ + Initialize the new `Limiter`. + + @param limits: List of `Limit` objects + """ + self.limits = copy.deepcopy(limits) + self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) + + # Pick up any per-user limit information + for key, value in kwargs.items(): + if key.startswith('user:'): + username = key[5:] + self.levels[username] = self.parse_limits(value) + + def get_limits(self, username=None): + """ + Return the limits for a given user. + """ + return [limit.display() for limit in self.levels[username]] + + def check_for_delay(self, verb, url, username=None): + """ + Check the given verb/user/user triplet for limit. + + @return: Tuple of delay (in seconds) and error message (or None, None) + """ + delays = [] + + for limit in self.levels[username]: + delay = limit(verb, url) + if delay: + delays.append((delay, limit.error_message)) + + if delays: + delays.sort() + return delays[0] + + return None, None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. We + # put this in the class so that subclasses can override the + # default limit parsing. + @staticmethod + def parse_limits(limits): + """ + Convert a string into a list of Limit instances. This + implementation expects a semicolon-separated sequence of + parenthesized groups, where each group contains a + comma-separated sequence consisting of HTTP method, + user-readable URI, a URI reg-exp, an integer number of + requests which can be made, and a unit of measure. Valid + values for the latter are "SECOND", "MINUTE", "HOUR", and + "DAY". + + @return: List of Limit instances. + """ + + # Handle empty limit strings + limits = limits.strip() + if not limits: + return [] + + # Split up the limits by semicolon + result = [] + for group in limits.split(';'): + group = group.strip() + if group[:1] != '(' or group[-1:] != ')': + raise ValueError("Limit rules must be surrounded by " + "parentheses") + group = group[1:-1] + + # Extract the Limit arguments + args = [a.strip() for a in group.split(',')] + if len(args) != 5: + raise ValueError("Limit rules must contain the following " + "arguments: verb, uri, regex, value, unit") + + # Pull out the arguments + verb, uri, regex, value, unit = args + + # Upper-case the verb + verb = verb.upper() + + # Convert value--raises ValueError if it's not integer + value = int(value) + + # Convert unit + unit = unit.upper() + if unit not in Limit.UNIT_MAP: + raise ValueError("Invalid units specified") + unit = Limit.UNIT_MAP[unit] + + # Build a limit + result.append(Limit(verb, uri, regex, value, unit)) + + return result + + +class WsgiLimiter(object): + """ + Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. + + To use, POST ``/`` with JSON data such as:: + + { + "verb" : GET, + "path" : "/servers" + } + + and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds + header containing the number of seconds to wait before the action would + succeed. + """ + + def __init__(self, limits=None): + """ + Initialize the new `WsgiLimiter`. + + @param limits: List of `Limit` objects + """ + self._limiter = Limiter(limits or DEFAULT_LIMITS) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, request): + """ + Handles a call to this application. Returns 204 if the request is + acceptable to the limiter, else a 403 is returned with a relevant + header indicating when the request *will* succeed. + """ + if request.method != "POST": + raise webob.exc.HTTPMethodNotAllowed() + + try: + info = dict(jsonutils.loads(request.body)) + except ValueError: + raise webob.exc.HTTPBadRequest() + + username = request.path_info_pop() + verb = info.get("verb") + path = info.get("path") + + delay, error = self._limiter.check_for_delay(verb, path, username) + + if delay: + headers = {"X-Wait-Seconds": "%.2f" % delay} + return webob.exc.HTTPForbidden(headers=headers, explanation=error) + else: + return webob.exc.HTTPNoContent() + + +class WsgiLimiterProxy(object): + """ + Rate-limit requests based on answers from a remote source. + """ + + def __init__(self, limiter_address): + """ + Initialize the new `WsgiLimiterProxy`. + + @param limiter_address: IP/port combination of where to request limit + """ + self.limiter_address = limiter_address + + def check_for_delay(self, verb, path, username=None): + body = jsonutils.dumps({"verb": verb, "path": path}) + headers = {"Content-Type": "application/json"} + + conn = httplib.HTTPConnection(self.limiter_address) + + if username: + conn.request("POST", "/%s" % (username), body, headers) + else: + conn.request("POST", "/", body, headers) + + resp = conn.getresponse() + + if 200 >= resp.status < 300: + return None, None + + return resp.getheader("X-Wait-Seconds"), resp.read() or None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. + # This implementation returns an empty list, since all limit + # decisions are made by a remote server. + @staticmethod + def parse_limits(limits): + """ + Ignore a limits string--simply doesn't apply for the limit + proxy. + + @return: Empty list. + """ + + return [] diff --git a/cinder/api/v1/router.py b/cinder/api/v1/router.py new file mode 100644 index 0000000000..bb361412cd --- /dev/null +++ b/cinder/api/v1/router.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +from cinder.api import extensions +import cinder.api.openstack +from cinder.api.v1 import limits +from cinder.api.v1 import snapshot_metadata +from cinder.api.v1 import snapshots +from cinder.api.v1 import types +from cinder.api.v1 import volume_metadata +from cinder.api.v1 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper, ext_mgr): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource(ext_mgr) + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['snapshot_metadata'] = \ + snapshot_metadata.create_resource() + snapshot_metadata_controller = self.resources['snapshot_metadata'] + + mapper.resource("snapshot_metadata", "metadata", + controller=snapshot_metadata_controller, + parent_resource=dict(member_name='snapshot', + collection_name='snapshots')) + + self.resources['limits'] = limits.create_resource() + mapper.resource("limit", "limits", + controller=self.resources['limits']) + self.resources['volume_metadata'] = \ + volume_metadata.create_resource() + volume_metadata_controller = self.resources['volume_metadata'] + + mapper.resource("volume_metadata", "metadata", + controller=volume_metadata_controller, + parent_resource=dict(member_name='volume', + collection_name='volumes')) + + mapper.connect("metadata", + "/{project_id}/volumes/{volume_id}/metadata", + controller=volume_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) diff --git a/cinder/api/v1/snapshot_metadata.py b/cinder/api/v1/snapshot_metadata.py new file mode 100644 index 0000000000..6322204ff7 --- /dev/null +++ b/cinder/api/v1/snapshot_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(object): + """ The volume metadata API controller for the OpenStack API """ + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, snapshot_id): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, snapshot_id): + """ Returns the list of metadata for a given snapshot""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, snapshot_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, snapshot_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_snapshot_metadata(context, + snapshot_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_snapshot_metadata(self, context, + snapshot_id, metadata, + delete=False): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + return self.volume_api.update_snapshot_metadata(context, + snapshot, + metadata, + delete) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, snapshot_id, id): + """ Return a single metadata item """ + context = req.environ['cinder.context'] + data = self._get_metadata(context, snapshot_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, snapshot_id, id): + """ Deletes an existing metadata """ + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, snapshot_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v1/snapshots.py b/cinder/api/v1/snapshots.py new file mode 100644 index 0000000000..6dd24e1a70 --- /dev/null +++ b/cinder/api/v1/snapshots.py @@ -0,0 +1,234 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v1 import volumes +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import utils +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_snapshot_detail_view(context, snapshot): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, snapshot) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, snapshot): + """Maps keys for snapshots summary view.""" + d = {} + + d['id'] = snapshot['id'] + d['created_at'] = snapshot['created_at'] + d['display_name'] = snapshot['display_name'] + d['display_description'] = snapshot['display_description'] + d['volume_id'] = snapshot['volume_id'] + d['status'] = snapshot['status'] + d['size'] = snapshot['volume_size'] + + if snapshot.get('snapshot_metadata'): + metadata = snapshot.get('snapshot_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'), + dict): + d['metadata'] = snapshot['metadata'] + else: + d['metadata'] = {} + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_id') + elem.append(common.MetadataTemplate()) + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr=None): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + allowed_search_options = ('status', 'volume_id', 'display_name') + volumes.remove_invalid_options(context, search_opts, + allowed_search_options) + + snapshots = self.volume_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + kwargs = {} + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'snapshot'): + raise exc.HTTPUnprocessableEntity() + + snapshot = body['snapshot'] + kwargs['metadata'] = snapshot.get('metadata', None) + + volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + if not utils.is_valid_boolstr(force): + msg = _("Invalid value '%s' for force. ") % force + raise exception.InvalidParameterValue(err=msg) + + if strutils.bool_from_string(force): + new_snapshot = self.volume_api.create_snapshot_force( + context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description'), + **kwargs) + else: + new_snapshot = self.volume_api.create_snapshot( + context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description'), + **kwargs) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + @wsgi.serializers(xml=SnapshotTemplate) + def update(self, req, id, body): + """Update a snapshot.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if 'snapshot' not in body: + raise exc.HTTPUnprocessableEntity() + + snapshot = body['snapshot'] + update_dict = {} + + valid_update_keys = ( + 'display_name', + 'display_description', + ) + + for key in valid_update_keys: + if key in snapshot: + update_dict[key] = snapshot[key] + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.update_snapshot(context, snapshot, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + snapshot.update(update_dict) + + return {'snapshot': _translate_snapshot_detail_view(context, snapshot)} + + +def create_resource(ext_mgr): + return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v1/types.py b/cinder/api/v1/types.py new file mode 100644 index 0000000000..1513b8dcfa --- /dev/null +++ b/cinder/api/v1/types.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume type & volume types extra specs extension.""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.views import types as views_types +from cinder.api import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """Returns the list of volume types.""" + context = req.environ['cinder.context'] + vol_types = volume_types.get_all_types(context).values() + return self._view_builder.index(req, vol_types) + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """Return a single volume type item.""" + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return self._view_builder.show(req, vol_type) + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/v1/volume_metadata.py b/cinder/api/v1/volume_metadata.py new file mode 100644 index 0000000000..4f6df75b6e --- /dev/null +++ b/cinder/api/v1/volume_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(object): + """ The volume metadata API controller for the OpenStack API """ + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, volume_id): + try: + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_metadata(context, volume) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, volume_id): + """ Returns the list of metadata for a given volume""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, volume_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, volume_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_volume_metadata(context, + volume_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_volume_metadata(self, context, + volume_id, metadata, + delete=False): + try: + volume = self.volume_api.get(context, volume_id) + return self.volume_api.update_volume_metadata(context, + volume, + metadata, + delete) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, volume_id, id): + """ Return a single metadata item """ + context = req.environ['cinder.context'] + data = self._get_metadata(context, volume_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, volume_id, id): + """ Deletes an existing metadata """ + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, volume_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + volume = self.volume_api.get(context, volume_id) + self.volume_api.delete_volume_metadata(context, volume, id) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v1/volumes.py b/cinder/api/v1/volumes.py new file mode 100644 index 0000000000..b7a687ee1f --- /dev/null +++ b/cinder/api/v1/volumes.py @@ -0,0 +1,421 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_attachment_detail_view(_context, vol): + """Maps keys for attachment details view.""" + + d = _translate_attachment_summary_view(_context, vol) + + # No additional data / lookups at the moment + + return d + + +def _translate_attachment_summary_view(_context, vol): + """Maps keys for attachment summary view.""" + d = {} + + volume_id = vol['id'] + + # NOTE(justinsb): We use the volume id as the id of the attachment object + d['id'] = volume_id + + d['volume_id'] = volume_id + d['server_id'] = vol['instance_uuid'] + if vol.get('mountpoint'): + d['device'] = vol['mountpoint'] + + return d + + +def _translate_volume_detail_view(context, vol, image_id=None): + """Maps keys for volumes details view.""" + + d = _translate_volume_summary_view(context, vol, image_id) + + # No additional data / lookups at the moment + + return d + + +def _translate_volume_summary_view(context, vol, image_id=None): + """Maps keys for volumes summary view.""" + d = {} + + d['id'] = vol['id'] + d['status'] = vol['status'] + d['size'] = vol['size'] + d['availability_zone'] = vol['availability_zone'] + d['created_at'] = vol['created_at'] + + d['attachments'] = [] + if vol['attach_status'] == 'attached': + attachment = _translate_attachment_detail_view(context, vol) + d['attachments'].append(attachment) + + d['display_name'] = vol['display_name'] + d['display_description'] = vol['display_description'] + + if vol['volume_type_id'] and vol.get('volume_type'): + d['volume_type'] = vol['volume_type']['name'] + else: + # TODO(bcwaldon): remove str cast once we use uuids + d['volume_type'] = str(vol['volume_type_id']) + + d['snapshot_id'] = vol['snapshot_id'] + d['source_volid'] = vol['source_volid'] + + if image_id: + d['image_id'] = image_id + + LOG.audit(_("vol=%s"), vol, context=context) + + if vol.get('volume_metadata'): + metadata = vol.get('volume_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif vol.get('metadata') and isinstance(vol.get('metadata'), dict): + d['metadata'] = vol['metadata'] + else: + d['metadata'] = {} + + if vol.get('volume_glance_metadata'): + d['bootable'] = 'true' + else: + d['bootable'] = 'false' + + return d + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_type') + elem.set('snapshot_id') + elem.set('source_volid') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class CommonDeserializer(wsgi.MetadataXMLDeserializer): + """Common deserializer to handle xml-formatted volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + metadata_deserializer = common.MetadataXMLDeserializer() + + def _extract_volume(self, node): + """Marshal the volume attribute of a parsed request.""" + volume = {} + volume_node = self.find_first_child_named(node, 'volume') + + attributes = ['display_name', 'display_description', 'size', + 'volume_type', 'availability_zone'] + for attr in attributes: + if volume_node.getAttribute(attr): + volume[attr] = volume_node.getAttribute(attr) + + metadata_node = self.find_first_child_named(volume_node, 'metadata') + if metadata_node is not None: + volume['metadata'] = self.extract_metadata(metadata_node) + + return volume + + +class CreateDeserializer(CommonDeserializer): + """Deserializer to handle xml-formatted create volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + def default(self, string): + """Deserialize an xml-formatted volume create request.""" + dom = utils.safe_minidom_parse_string(string) + volume = self._extract_volume(dom) + return {'body': {'volume': volume}} + + +class VolumeController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(VolumeController, self).__init__() + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'volume': _translate_volume_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._items(req, entity_maker=_translate_volume_summary_view) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._items(req, entity_maker=_translate_volume_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of volumes, transformed through entity_maker.""" + + search_opts = {} + search_opts.update(req.GET) + + context = req.environ['cinder.context'] + remove_invalid_options(context, + search_opts, self._get_volume_search_options()) + + volumes = self.volume_api.get_all(context, marker=None, limit=None, + sort_key='created_at', + sort_dir='desc', filters=search_opts) + limited_list = common.limited(volumes, req) + res = [entity_maker(context, vol) for vol in limited_list] + return {'volumes': res} + + def _image_uuid_from_href(self, image_href): + # If the image href was generated by nova api, strip image_href + # down to an id. + try: + image_uuid = image_href.split('/').pop() + except (TypeError, AttributeError): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + if not uuidutils.is_uuid_like(image_uuid): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + return image_uuid + + @wsgi.serializers(xml=VolumeTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Creates a new volume.""" + if not self.is_valid_body(body, 'volume'): + raise exc.HTTPUnprocessableEntity() + + context = req.environ['cinder.context'] + volume = body['volume'] + + kwargs = {} + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + if not uuidutils.is_uuid_like(req_volume_type): + try: + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( + context, req_volume_type) + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) + else: + try: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + source_volid = volume.get('source_volid') + if source_volid is not None: + kwargs['source_volume'] = self.volume_api.get_volume(context, + source_volid) + else: + kwargs['source_volume'] = None + + size = volume.get('size', None) + if size is None and kwargs['snapshot'] is not None: + size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] + + LOG.audit(_("Create volume of %s GB"), size, context=context) + + image_href = None + image_uuid = None + if self.ext_mgr.is_loaded('os-image-create'): + image_href = volume.get('imageRef') + if image_href: + image_uuid = self._image_uuid_from_href(image_href) + kwargs['image_id'] = image_uuid + + kwargs['availability_zone'] = volume.get('availability_zone', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + retval = _translate_volume_detail_view(context, + dict(new_volume.iteritems()), + image_uuid) + + return {'volume': retval} + + def _get_volume_search_options(self): + """Return volume search options allowed by non-admin.""" + return ('display_name', 'status') + + @wsgi.serializers(xml=VolumeTemplate) + def update(self, req, id, body): + """Update a volume.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if 'volume' not in body: + raise exc.HTTPUnprocessableEntity() + + volume = body['volume'] + update_dict = {} + + valid_update_keys = ( + 'display_name', + 'display_description', + 'metadata', + ) + + for key in valid_update_keys: + if key in volume: + update_dict[key] = volume[key] + + try: + volume = self.volume_api.get(context, id) + self.volume_api.update(context, volume, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + volume.update(update_dict) + + return {'volume': _translate_volume_detail_view(context, volume)} + + +def create_resource(ext_mgr): + return wsgi.Resource(VolumeController(ext_mgr)) + + +def remove_invalid_options(context, search_options, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in search_options + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%(bad_options)s' from query") % locals() + LOG.debug(log_msg) + for opt in unknown_options: + del search_options[opt] diff --git a/cinder/api/v2/__init__.py b/cinder/api/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/api/v2/limits.py b/cinder/api/v2/limits.py new file mode 100644 index 0000000000..b8a0ad848d --- /dev/null +++ b/cinder/api/v2/limits.py @@ -0,0 +1,482 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Module dedicated functions/classes dealing with rate limiting requests. +""" + +import collections +import copy +import httplib +import math +import re +import time + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.api.views import limits as limits_views +from cinder.api import xmlutil +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder import quota +from cinder import wsgi as base_wsgi + +QUOTAS = quota.QUOTAS + + +# Convenience constants for the limits dictionary passed to Limiter(). +PER_SECOND = 1 +PER_MINUTE = 60 +PER_HOUR = 60 * 60 +PER_DAY = 60 * 60 * 24 + + +limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class LimitsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('limits', selector='limits') + + rates = xmlutil.SubTemplateElement(root, 'rates') + rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') + rate.set('uri', 'uri') + rate.set('regex', 'regex') + limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') + limit.set('value', 'value') + limit.set('verb', 'verb') + limit.set('remaining', 'remaining') + limit.set('unit', 'unit') + limit.set('next-available', 'next-available') + + absolute = xmlutil.SubTemplateElement(root, 'absolute', + selector='absolute') + limit = xmlutil.SubTemplateElement(absolute, 'limit', + selector=xmlutil.get_items) + limit.set('name', 0) + limit.set('value', 1) + + return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) + + +class LimitsController(object): + """ + Controller for accessing limits in the OpenStack API. + """ + + @wsgi.serializers(xml=LimitsTemplate) + def index(self, req): + """ + Return all global and rate limit information. + """ + context = req.environ['cinder.context'] + quotas = QUOTAS.get_project_quotas(context, context.project_id, + usages=False) + abs_limits = dict((k, v['limit']) for k, v in quotas.items()) + rate_limits = req.environ.get("cinder.limits", []) + + builder = self._get_view_builder(req) + return builder.build(rate_limits, abs_limits) + + def _get_view_builder(self, req): + return limits_views.ViewBuilder() + + +def create_resource(): + return wsgi.Resource(LimitsController()) + + +class Limit(object): + """ + Stores information about a limit for HTTP requests. + """ + + UNITS = { + 1: "SECOND", + 60: "MINUTE", + 60 * 60: "HOUR", + 60 * 60 * 24: "DAY", + } + + UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) + + def __init__(self, verb, uri, regex, value, unit): + """ + Initialize a new `Limit`. + + @param verb: HTTP verb (POST, PUT, etc.) + @param uri: Human-readable URI + @param regex: Regular expression format for this limit + @param value: Integer number of requests which can be made + @param unit: Unit of measure for the value parameter + """ + self.verb = verb + self.uri = uri + self.regex = regex + self.value = int(value) + self.unit = unit + self.unit_string = self.display_unit().lower() + self.remaining = int(value) + + if value <= 0: + raise ValueError("Limit value must be > 0") + + self.last_request = None + self.next_request = None + + self.water_level = 0 + self.capacity = self.unit + self.request_value = float(self.capacity) / float(self.value) + msg = _("Only %(value)s %(verb)s request(s) can be " + "made to %(uri)s every %(unit_string)s.") + self.error_message = msg % self.__dict__ + + def __call__(self, verb, url): + """ + Represents a call to this limit from a relevant request. + + @param verb: string http verb (POST, GET, etc.) + @param url: string URL + """ + if self.verb != verb or not re.match(self.regex, url): + return + + now = self._get_time() + + if self.last_request is None: + self.last_request = now + + leak_value = now - self.last_request + + self.water_level -= leak_value + self.water_level = max(self.water_level, 0) + self.water_level += self.request_value + + difference = self.water_level - self.capacity + + self.last_request = now + + if difference > 0: + self.water_level -= self.request_value + self.next_request = now + difference + return difference + + cap = self.capacity + water = self.water_level + val = self.value + + self.remaining = math.floor(((cap - water) / cap) * val) + self.next_request = now + + def _get_time(self): + """Retrieve the current time. Broken out for testability.""" + return time.time() + + def display_unit(self): + """Display the string name of the unit.""" + return self.UNITS.get(self.unit, "UNKNOWN") + + def display(self): + """Return a useful representation of this class.""" + return { + "verb": self.verb, + "URI": self.uri, + "regex": self.regex, + "value": self.value, + "remaining": int(self.remaining), + "unit": self.display_unit(), + "resetTime": int(self.next_request or self._get_time()), + } + +# "Limit" format is a dictionary with the HTTP verb, human-readable URI, +# a regular-expression to match, value and unit of measure (PER_DAY, etc.) + +DEFAULT_LIMITS = [ + Limit("POST", "*", ".*", 10, PER_MINUTE), + Limit("POST", "*/servers", "^/servers", 50, PER_DAY), + Limit("PUT", "*", ".*", 10, PER_MINUTE), + Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), + Limit("DELETE", "*", ".*", 100, PER_MINUTE), +] + + +class RateLimitingMiddleware(base_wsgi.Middleware): + """ + Rate-limits requests passing through this middleware. All limit information + is stored in memory for this implementation. + """ + + def __init__(self, application, limits=None, limiter=None, **kwargs): + """ + Initialize new `RateLimitingMiddleware`, which wraps the given WSGI + application and sets up the given limits. + + @param application: WSGI application to wrap + @param limits: String describing limits + @param limiter: String identifying class for representing limits + + Other parameters are passed to the constructor for the limiter. + """ + base_wsgi.Middleware.__init__(self, application) + + # Select the limiter class + if limiter is None: + limiter = Limiter + else: + limiter = importutils.import_class(limiter) + + # Parse the limits, if any are provided + if limits is not None: + limits = limiter.parse_limits(limits) + + self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """ + Represents a single call through this middleware. We should record the + request if we have a limit relevant to it. If no limit is relevant to + the request, ignore it. + + If the request should be rate limited, return a fault telling the user + they are over the limit and need to retry later. + """ + verb = req.method + url = req.url + context = req.environ.get("cinder.context") + + if context: + username = context.user_id + else: + username = None + + delay, error = self._limiter.check_for_delay(verb, url, username) + + if delay: + msg = _("This request was rate-limited.") + retry = time.time() + delay + return wsgi.OverLimitFault(msg, error, retry) + + req.environ["cinder.limits"] = self._limiter.get_limits(username) + + return self.application + + +class Limiter(object): + """ + Rate-limit checking class which handles limits in memory. + """ + + def __init__(self, limits, **kwargs): + """ + Initialize the new `Limiter`. + + @param limits: List of `Limit` objects + """ + self.limits = copy.deepcopy(limits) + self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) + + # Pick up any per-user limit information + for key, value in kwargs.items(): + if key.startswith('user:'): + username = key[5:] + self.levels[username] = self.parse_limits(value) + + def get_limits(self, username=None): + """ + Return the limits for a given user. + """ + return [limit.display() for limit in self.levels[username]] + + def check_for_delay(self, verb, url, username=None): + """ + Check the given verb/user/user triplet for limit. + + @return: Tuple of delay (in seconds) and error message (or None, None) + """ + delays = [] + + for limit in self.levels[username]: + delay = limit(verb, url) + if delay: + delays.append((delay, limit.error_message)) + + if delays: + delays.sort() + return delays[0] + + return None, None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. We + # put this in the class so that subclasses can override the + # default limit parsing. + @staticmethod + def parse_limits(limits): + """ + Convert a string into a list of Limit instances. This + implementation expects a semicolon-separated sequence of + parenthesized groups, where each group contains a + comma-separated sequence consisting of HTTP method, + user-readable URI, a URI reg-exp, an integer number of + requests which can be made, and a unit of measure. Valid + values for the latter are "SECOND", "MINUTE", "HOUR", and + "DAY". + + @return: List of Limit instances. + """ + + # Handle empty limit strings + limits = limits.strip() + if not limits: + return [] + + # Split up the limits by semicolon + result = [] + for group in limits.split(';'): + group = group.strip() + if group[:1] != '(' or group[-1:] != ')': + raise ValueError("Limit rules must be surrounded by " + "parentheses") + group = group[1:-1] + + # Extract the Limit arguments + args = [a.strip() for a in group.split(',')] + if len(args) != 5: + raise ValueError("Limit rules must contain the following " + "arguments: verb, uri, regex, value, unit") + + # Pull out the arguments + verb, uri, regex, value, unit = args + + # Upper-case the verb + verb = verb.upper() + + # Convert value--raises ValueError if it's not integer + value = int(value) + + # Convert unit + unit = unit.upper() + if unit not in Limit.UNIT_MAP: + raise ValueError("Invalid units specified") + unit = Limit.UNIT_MAP[unit] + + # Build a limit + result.append(Limit(verb, uri, regex, value, unit)) + + return result + + +class WsgiLimiter(object): + """ + Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. + + To use, POST ``/`` with JSON data such as:: + + { + "verb" : GET, + "path" : "/servers" + } + + and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds + header containing the number of seconds to wait before the action would + succeed. + """ + + def __init__(self, limits=None): + """ + Initialize the new `WsgiLimiter`. + + @param limits: List of `Limit` objects + """ + self._limiter = Limiter(limits or DEFAULT_LIMITS) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, request): + """ + Handles a call to this application. Returns 204 if the request is + acceptable to the limiter, else a 403 is returned with a relevant + header indicating when the request *will* succeed. + """ + if request.method != "POST": + raise webob.exc.HTTPMethodNotAllowed() + + try: + info = dict(jsonutils.loads(request.body)) + except ValueError: + raise webob.exc.HTTPBadRequest() + + username = request.path_info_pop() + verb = info.get("verb") + path = info.get("path") + + delay, error = self._limiter.check_for_delay(verb, path, username) + + if delay: + headers = {"X-Wait-Seconds": "%.2f" % delay} + return webob.exc.HTTPForbidden(headers=headers, explanation=error) + else: + return webob.exc.HTTPNoContent() + + +class WsgiLimiterProxy(object): + """ + Rate-limit requests based on answers from a remote source. + """ + + def __init__(self, limiter_address): + """ + Initialize the new `WsgiLimiterProxy`. + + @param limiter_address: IP/port combination of where to request limit + """ + self.limiter_address = limiter_address + + def check_for_delay(self, verb, path, username=None): + body = jsonutils.dumps({"verb": verb, "path": path}) + headers = {"Content-Type": "application/json"} + + conn = httplib.HTTPConnection(self.limiter_address) + + if username: + conn.request("POST", "/%s" % (username), body, headers) + else: + conn.request("POST", "/", body, headers) + + resp = conn.getresponse() + + if 200 >= resp.status < 300: + return None, None + + return resp.getheader("X-Wait-Seconds"), resp.read() or None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. + # This implementation returns an empty list, since all limit + # decisions are made by a remote server. + @staticmethod + def parse_limits(limits): + """ + Ignore a limits string--simply doesn't apply for the limit + proxy. + + @return: Empty list. + """ + + return [] diff --git a/cinder/api/v2/router.py b/cinder/api/v2/router.py new file mode 100644 index 0000000000..e3e51399f1 --- /dev/null +++ b/cinder/api/v2/router.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +from cinder.api import extensions +import cinder.api.openstack +from cinder.api.v2 import limits +from cinder.api.v2 import snapshots +from cinder.api.v2 import types +from cinder.api.v2 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper, ext_mgr): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource(ext_mgr) + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['limits'] = limits.create_resource() + mapper.resource("limit", "limits", + controller=self.resources['limits']) diff --git a/cinder/api/v2/snapshot_metadata.py b/cinder/api/v2/snapshot_metadata.py new file mode 100644 index 0000000000..6322204ff7 --- /dev/null +++ b/cinder/api/v2/snapshot_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(object): + """ The volume metadata API controller for the OpenStack API """ + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, snapshot_id): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, snapshot_id): + """ Returns the list of metadata for a given snapshot""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, snapshot_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, snapshot_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_snapshot_metadata(context, + snapshot_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_snapshot_metadata(self, context, + snapshot_id, metadata, + delete=False): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + return self.volume_api.update_snapshot_metadata(context, + snapshot, + metadata, + delete) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, snapshot_id, id): + """ Return a single metadata item """ + context = req.environ['cinder.context'] + data = self._get_metadata(context, snapshot_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, snapshot_id, id): + """ Deletes an existing metadata """ + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, snapshot_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py new file mode 100644 index 0000000000..5c7dc1f9b9 --- /dev/null +++ b/cinder/api/v2/snapshots.py @@ -0,0 +1,257 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v2 import volumes +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import utils +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_snapshot_detail_view(context, snapshot): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, snapshot) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, snapshot): + """Maps keys for snapshots summary view.""" + d = {} + + d['id'] = snapshot['id'] + d['created_at'] = snapshot['created_at'] + d['name'] = snapshot['display_name'] + d['description'] = snapshot['display_description'] + d['volume_id'] = snapshot['volume_id'] + d['status'] = snapshot['status'] + d['size'] = snapshot['volume_size'] + + if snapshot.get('snapshot_metadata'): + metadata = snapshot.get('snapshot_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'), + dict): + d['metadata'] = snapshot['metadata'] + else: + d['metadata'] = {} + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('volume_id') + elem.append(common.MetadataTemplate()) + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr=None): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + search_opts = {} + search_opts.update(req.GET) + allowed_search_options = ('status', 'volume_id', 'name') + volumes.remove_invalid_options(context, search_opts, + allowed_search_options) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + snapshots = self.volume_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.response(202) + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + kwargs = {} + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'snapshot'): + raise exc.HTTPBadRequest() + + snapshot = body['snapshot'] + kwargs['metadata'] = snapshot.get('metadata', None) + + volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in snapshot: + snapshot['display_name'] = snapshot.get('name') + del snapshot['name'] + + if not utils.is_valid_boolstr(force): + msg = _("Invalid value '%s' for force. ") % force + raise exception.InvalidParameterValue(err=msg) + + if strutils.bool_from_string(force): + new_snapshot = self.volume_api.create_snapshot_force( + context, + volume, + snapshot.get('display_name'), + snapshot.get('description'), + **kwargs) + else: + new_snapshot = self.volume_api.create_snapshot( + context, + volume, + snapshot.get('display_name'), + snapshot.get('description'), + **kwargs) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + @wsgi.serializers(xml=SnapshotTemplate) + def update(self, req, id, body): + """Update a snapshot.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPBadRequest() + + if 'snapshot' not in body: + raise exc.HTTPBadRequest() + + snapshot = body['snapshot'] + update_dict = {} + + valid_update_keys = ( + 'name', + 'description', + 'display_description', + ) + + # NOTE(thingee): v2 API allows description instead of + # display_description + if 'description' in snapshot: + snapshot['display_description'] = snapshot['description'] + del snapshot['description'] + + for key in valid_update_keys: + if key in snapshot: + update_dict[key] = snapshot[key] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in update_dict: + update_dict['display_name'] = update_dict['name'] + del update_dict['name'] + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.update_snapshot(context, snapshot, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + snapshot.update(update_dict) + + return {'snapshot': _translate_snapshot_detail_view(context, snapshot)} + + +def create_resource(ext_mgr): + return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v2/types.py b/cinder/api/v2/types.py new file mode 100644 index 0000000000..1513b8dcfa --- /dev/null +++ b/cinder/api/v2/types.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume type & volume types extra specs extension.""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.views import types as views_types +from cinder.api import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """Returns the list of volume types.""" + context = req.environ['cinder.context'] + vol_types = volume_types.get_all_types(context).values() + return self._view_builder.index(req, vol_types) + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """Return a single volume type item.""" + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return self._view_builder.show(req, vol_type) + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/v2/views/__init__.py b/cinder/api/v2/views/__init__.py new file mode 100644 index 0000000000..cbf4a45060 --- /dev/null +++ b/cinder/api/v2/views/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/v2/views/volumes.py b/cinder/api/v2/views/volumes.py new file mode 100644 index 0000000000..c1deec4663 --- /dev/null +++ b/cinder/api/v2/views/volumes.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = "volumes" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, volumes): + """Show a list of volumes without many details.""" + return self._list_view(self.summary, request, volumes) + + def detail_list(self, request, volumes): + """Detailed view of a list of volumes.""" + return self._list_view(self.detail, request, volumes) + + def summary(self, request, volume): + """Generic, non-detailed view of an volume.""" + return { + 'volume': { + 'id': volume['id'], + 'name': volume['display_name'], + 'links': self._get_links(request, + volume['id']), + }, + } + + def detail(self, request, volume): + """Detailed view of a single volume.""" + return { + 'volume': { + 'id': volume.get('id'), + 'status': volume.get('status'), + 'size': volume.get('size'), + 'availability_zone': volume.get('availability_zone'), + 'created_at': volume.get('created_at'), + 'attachments': self._get_attachments(volume), + 'name': volume.get('display_name'), + 'description': volume.get('display_description'), + 'volume_type': self._get_volume_type(volume), + 'snapshot_id': volume.get('snapshot_id'), + 'source_volid': volume.get('source_volid'), + 'metadata': self._get_volume_metadata(volume), + 'links': self._get_links(request, volume['id']) + } + } + + def _get_attachments(self, volume): + """Retrieves the attachments of the volume object""" + attachments = [] + + if volume['attach_status'] == 'attached': + d = {} + volume_id = volume['id'] + + # note(justinsb): we use the volume id as the id of the attachments + # object + d['id'] = volume_id + + d['volume_id'] = volume_id + d['server_id'] = volume['instance_uuid'] + if volume.get('mountpoint'): + d['device'] = volume['mountpoint'] + attachments.append(d) + + return attachments + + def _get_volume_metadata(self, volume): + """Retrieves the metadata of the volume object""" + if volume.get('volume_metadata'): + metadata = volume.get('volume_metadata') + return dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif volume.get('metadata') and isinstance(volume.get('metadata'), + dict): + return volume['metadata'] + return {} + + def _get_volume_type(self, volume): + """Retrieves the type the volume object is""" + if volume['volume_type_id'] and volume.get('volume_type'): + return volume['volume_type']['name'] + else: + return volume['volume_type_id'] + + def _list_view(self, func, request, volumes): + """Provide a view for a list of volumes.""" + volumes_list = [func(request, volume)['volume'] for volume in volumes] + volumes_links = self._get_collection_links(request, + volumes, + self._collection_name) + volumes_dict = dict(volumes=volumes_list) + + if volumes_links: + volumes_dict['volumes_links'] = volumes_links + + return volumes_dict diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py new file mode 100644 index 0000000000..bf46449d44 --- /dev/null +++ b/cinder/api/v2/volumes.py @@ -0,0 +1,362 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v2.views import volumes as volume_views +from cinder.api import xmlutil +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('volume_type') + elem.set('snapshot_id') + elem.set('source_volid') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class CommonDeserializer(wsgi.MetadataXMLDeserializer): + """Common deserializer to handle xml-formatted volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + metadata_deserializer = common.MetadataXMLDeserializer() + + def _extract_volume(self, node): + """Marshal the volume attribute of a parsed request.""" + volume = {} + volume_node = self.find_first_child_named(node, 'volume') + + attributes = ['name', 'description', 'size', + 'volume_type', 'availability_zone'] + for attr in attributes: + if volume_node.getAttribute(attr): + volume[attr] = volume_node.getAttribute(attr) + + metadata_node = self.find_first_child_named(volume_node, 'metadata') + if metadata_node is not None: + volume['metadata'] = self.extract_metadata(metadata_node) + + return volume + + +class CreateDeserializer(CommonDeserializer): + """Deserializer to handle xml-formatted create volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + def default(self, string): + """Deserialize an xml-formatted volume create request.""" + dom = utils.safe_minidom_parse_string(string) + volume = self._extract_volume(dom) + return {'body': {'volume': volume}} + + +class VolumeController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + _view_builder_class = volume_views.ViewBuilder + + def __init__(self, ext_mgr): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(VolumeController, self).__init__() + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._view_builder.detail(req, vol) + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._get_volumes(req, is_detail=False) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._get_volumes(req, is_detail=True) + + def _get_volumes(self, req, is_detail): + """Returns a list of volumes, transformed through view builder.""" + + context = req.environ['cinder.context'] + + params = req.params.copy() + marker = params.pop('marker', None) + limit = params.pop('limit', None) + sort_key = params.pop('sort_key', 'created_at') + sort_dir = params.pop('sort_dir', 'desc') + params.pop('offset', None) + filters = params + + remove_invalid_options(context, + filters, self._get_volume_filter_options()) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in filters: + filters['display_name'] = filters['name'] + del filters['name'] + + volumes = self.volume_api.get_all(context, marker, limit, sort_key, + sort_dir, filters) + limited_list = common.limited(volumes, req) + + if is_detail: + volumes = self._view_builder.detail_list(req, limited_list) + else: + volumes = self._view_builder.summary_list(req, limited_list) + return volumes + + def _image_uuid_from_href(self, image_href): + # If the image href was generated by nova api, strip image_href + # down to an id. + try: + image_uuid = image_href.split('/').pop() + except (TypeError, AttributeError): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + if not uuidutils.is_uuid_like(image_uuid): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + return image_uuid + + @wsgi.response(202) + @wsgi.serializers(xml=VolumeTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Creates a new volume.""" + if not self.is_valid_body(body, 'volume'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + volume = body['volume'] + + kwargs = {} + + # NOTE(thingee): v2 API allows name instead of display_name + if volume.get('name'): + volume['display_name'] = volume.get('name') + del volume['name'] + + # NOTE(thingee): v2 API allows description instead of description + if volume.get('description'): + volume['display_description'] = volume.get('description') + del volume['description'] + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + try: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + source_volid = volume.get('source_volid') + if source_volid is not None: + kwargs['source_volume'] = self.volume_api.get_volume(context, + source_volid) + else: + kwargs['source_volume'] = None + + size = volume.get('size', None) + if size is None and kwargs['snapshot'] is not None: + size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] + + LOG.audit(_("Create volume of %s GB"), size, context=context) + + image_href = None + image_uuid = None + if self.ext_mgr.is_loaded('os-image-create'): + image_href = volume.get('imageRef') + if image_href: + image_uuid = self._image_uuid_from_href(image_href) + kwargs['image_id'] = image_uuid + + kwargs['availability_zone'] = volume.get('availability_zone', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + retval = self._view_builder.summary(req, dict(new_volume.iteritems())) + + return retval + + def _get_volume_filter_options(self): + """Return volume search options allowed by non-admin.""" + return ('name', 'status') + + @wsgi.serializers(xml=VolumeTemplate) + def update(self, req, id, body): + """Update a volume.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPBadRequest() + + if 'volume' not in body: + raise exc.HTTPBadRequest() + + volume = body['volume'] + update_dict = {} + + valid_update_keys = ( + 'name', + 'description', + 'metadata', + ) + + for key in valid_update_keys: + if key in volume: + update_dict[key] = volume[key] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in update_dict: + update_dict['display_name'] = update_dict['name'] + del update_dict['name'] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'description' in update_dict: + update_dict['display_description'] = update_dict['description'] + del update_dict['description'] + + try: + volume = self.volume_api.get(context, id) + self.volume_api.update(context, volume, update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + volume.update(update_dict) + + return self._view_builder.detail(req, volume) + + +def create_resource(ext_mgr): + return wsgi.Resource(VolumeController(ext_mgr)) + + +def remove_invalid_options(context, filters, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in filters + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%s' from query") % bad_options + LOG.debug(log_msg) + for opt in unknown_options: + del filters[opt] diff --git a/cinder/api/versions.py b/cinder/api/versions.py new file mode 100644 index 0000000000..30627bd29a --- /dev/null +++ b/cinder/api/versions.py @@ -0,0 +1,282 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +from lxml import etree + +from cinder.api.openstack import wsgi +from cinder.api.views import versions as views_versions +from cinder.api import xmlutil +from cinder import flags + +FLAGS = flags.FLAGS + + +_KNOWN_VERSIONS = { + "v2.0": { + "id": "v2.0", + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + }, + "v1.0": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + } + +} + + +def get_supported_versions(): + versions = {} + + if FLAGS.enable_v1_api: + versions['v1.0'] = _KNOWN_VERSIONS['v1.0'] + if FLAGS.enable_v2_api: + versions['v2.0'] = _KNOWN_VERSIONS['v2.0'] + + return versions + + +class MediaTypesTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return 'media-types' in datum + + +def make_version(elem): + elem.set('id') + elem.set('status') + elem.set('updated') + + mts = MediaTypesTemplateElement('media-types') + elem.append(mts) + + mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') + mt.set('base') + mt.set('type') + + xmlutil.make_links(elem, 'links') + + +version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class VersionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('version', selector='version') + make_version(root) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class VersionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('versions') + elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class ChoicesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('choices') + elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class AtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} + if not xmlns: + self.xmlns = wsgi.XMLNS_ATOM + else: + self.xmlns = xmlns + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = datetime.datetime.strptime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title + + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + etree.SubElement(feed, 'updated').text = recent + + etree.SubElement(feed, 'id').text = feed_id + + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) + + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' + + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry + + +class VersionsAtomSerializer(AtomSerializer): + def default(self, data): + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) + + +class VersionAtomSerializer(AtomSerializer): + def default(self, data): + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) + + +class Versions(wsgi.Resource): + + def __init__(self): + super(Versions, self).__init__(None) + + @wsgi.serializers(xml=VersionsTemplate, + atom=VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(get_supported_versions()) + + @wsgi.serializers(xml=ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(get_supported_versions(), req) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args + + +class VolumeVersionV1(object): + @wsgi.serializers(xml=VersionTemplate, + atom=VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(_KNOWN_VERSIONS['v1.0']) + + +def create_resource(): + return wsgi.Resource(VolumeVersionV1()) diff --git a/cinder/api/views/__init__.py b/cinder/api/views/__init__.py new file mode 100644 index 0000000000..d65c689a83 --- /dev/null +++ b/cinder/api/views/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/views/backups.py b/cinder/api/views/backups.py new file mode 100644 index 0000000000..446bf30c61 --- /dev/null +++ b/cinder/api/views/backups.py @@ -0,0 +1,90 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model backup API responses as a python dictionary.""" + + _collection_name = "backups" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, backups): + """Show a list of backups without many details.""" + return self._list_view(self.summary, request, backups) + + def detail_list(self, request, backups): + """Detailed view of a list of backups .""" + return self._list_view(self.detail, request, backups) + + def summary(self, request, backup): + """Generic, non-detailed view of a backup.""" + return { + 'backup': { + 'id': backup['id'], + 'name': backup['display_name'], + 'links': self._get_links(request, + backup['id']), + }, + } + + def restore_summary(self, request, restore): + """Generic, non-detailed view of a restore.""" + return { + 'restore': { + 'backup_id': restore['backup_id'], + 'volume_id': restore['volume_id'], + }, + } + + def detail(self, request, backup): + """Detailed view of a single backup.""" + return { + 'backup': { + 'id': backup.get('id'), + 'status': backup.get('status'), + 'size': backup.get('size'), + 'object_count': backup.get('object_count'), + 'availability_zone': backup.get('availability_zone'), + 'container': backup.get('container'), + 'created_at': backup.get('created_at'), + 'name': backup.get('display_name'), + 'description': backup.get('display_description'), + 'fail_reason': backup.get('fail_reason'), + 'volume_id': backup.get('volume_id'), + 'links': self._get_links(request, backup['id']) + } + } + + def _list_view(self, func, request, backups): + """Provide a view for a list of backups.""" + backups_list = [func(request, backup)['backup'] for backup in backups] + backups_links = self._get_collection_links(request, + backups, + self._collection_name) + backups_dict = dict(backups=backups_list) + + if backups_links: + backups_dict['backups_links'] = backups_links + + return backups_dict diff --git a/cinder/api/views/limits.py b/cinder/api/views/limits.py new file mode 100644 index 0000000000..81b1e794ec --- /dev/null +++ b/cinder/api/views/limits.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder.openstack.common import timeutils + + +class ViewBuilder(object): + """OpenStack API base limits view builder.""" + + def build(self, rate_limits, absolute_limits): + rate_limits = self._build_rate_limits(rate_limits) + absolute_limits = self._build_absolute_limits(absolute_limits) + + output = { + "limits": { + "rate": rate_limits, + "absolute": absolute_limits, + }, + } + + return output + + def _build_absolute_limits(self, absolute_limits): + """Builder for absolute limits + + absolute_limits should be given as a dict of limits. + For example: {"ram": 512, "gigabytes": 1024}. + + """ + limit_names = { + "ram": ["maxTotalRAMSize"], + "instances": ["maxTotalInstances"], + "cores": ["maxTotalCores"], + "gigabytes": ["maxTotalVolumeGigabytes"], + "volumes": ["maxTotalVolumes"], + "key_pairs": ["maxTotalKeypairs"], + "floating_ips": ["maxTotalFloatingIps"], + "metadata_items": ["maxServerMeta", "maxImageMeta"], + "injected_files": ["maxPersonality"], + "injected_file_content_bytes": ["maxPersonalitySize"], + } + limits = {} + for name, value in absolute_limits.iteritems(): + if name in limit_names and value is not None: + for name in limit_names[name]: + limits[name] = value + return limits + + def _build_rate_limits(self, rate_limits): + limits = [] + for rate_limit in rate_limits: + _rate_limit_key = None + _rate_limit = self._build_rate_limit(rate_limit) + + # check for existing key + for limit in limits: + if (limit["uri"] == rate_limit["URI"] and + limit["regex"] == rate_limit["regex"]): + _rate_limit_key = limit + break + + # ensure we have a key if we didn't find one + if not _rate_limit_key: + _rate_limit_key = { + "uri": rate_limit["URI"], + "regex": rate_limit["regex"], + "limit": [], + } + limits.append(_rate_limit_key) + + _rate_limit_key["limit"].append(_rate_limit) + + return limits + + def _build_rate_limit(self, rate_limit): + _get_utc = datetime.datetime.utcfromtimestamp + next_avail = _get_utc(rate_limit["resetTime"]) + return { + "verb": rate_limit["verb"], + "value": rate_limit["value"], + "remaining": int(rate_limit["remaining"]), + "unit": rate_limit["unit"], + "next-available": timeutils.isotime(at=next_avail), + } diff --git a/cinder/api/views/share_snapshots.py b/cinder/api/views/share_snapshots.py new file mode 100644 index 0000000000..dc1c8a05c0 --- /dev/null +++ b/cinder/api/views/share_snapshots.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = 'share-snapshots' + + def summary_list(self, request, snapshots): + """Show a list of share snapshots without many details.""" + return self._list_view(self.summary, request, snapshots) + + def detail_list(self, request, snapshots): + """Detailed view of a list of share snapshots.""" + return self._list_view(self.detail, request, snapshots) + + def summary(self, request, snapshot): + """Generic, non-detailed view of an share snapshot.""" + return { + 'share-snapshot': { + 'id': snapshot.get('id'), + 'name': snapshot.get('display_name'), + 'links': self._get_links(request, snapshot['id']) + } + } + + def detail(self, request, snapshot): + """Detailed view of a single share snapshot.""" + return { + 'share-snapshot': { + 'id': snapshot.get('id'), + 'share_id': snapshot.get('share_id'), + 'share_size': snapshot.get('share_size'), + 'created_at': snapshot.get('created_at'), + 'status': snapshot.get('status'), + 'name': snapshot.get('display_name'), + 'description': snapshot.get('display_description'), + 'share_proto': snapshot.get('share_proto'), + 'export_location': snapshot.get('export_location'), + 'links': self._get_links(request, snapshot['id']) + } + } + + def _list_view(self, func, request, snapshots): + """Provide a view for a list of share snapshots.""" + snapshots_list = [func(request, snapshot)['share-snapshot'] + for snapshot in snapshots] + snapshots_links = self._get_collection_links(request, + snapshots, + self._collection_name) + snapshots_dict = {self._collection_name: snapshots_list} + + if snapshots_links: + snapshots_dict['share_snapshots_links'] = snapshots_links + + return snapshots_dict diff --git a/cinder/api/views/shares.py b/cinder/api/views/shares.py new file mode 100644 index 0000000000..32fb58a5ac --- /dev/null +++ b/cinder/api/views/shares.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = 'shares' + + def summary_list(self, request, shares): + """Show a list of shares without many details.""" + return self._list_view(self.summary, request, shares) + + def detail_list(self, request, shares): + """Detailed view of a list of shares.""" + return self._list_view(self.detail, request, shares) + + def summary(self, request, share): + """Generic, non-detailed view of an share.""" + return { + 'share': { + 'id': share.get('id'), + 'name': share.get('display_name'), + 'links': self._get_links(request, share['id']) + } + } + + def detail(self, request, share): + """Detailed view of a single share.""" + return { + 'share': { + 'id': share.get('id'), + 'size': share.get('size'), + 'availability_zone': share.get('availability_zone'), + 'created_at': share.get('created_at'), + 'status': share.get('status'), + 'name': share.get('display_name'), + 'description': share.get('display_description'), + 'snapshot_id': share.get('snapshot_id'), + 'share_proto': share.get('share_proto'), + 'export_location': share.get('export_location'), + 'links': self._get_links(request, share['id']) + } + } + + def _list_view(self, func, request, shares): + """Provide a view for a list of shares.""" + shares_list = [func(request, share)['share'] for share in shares] + shares_links = self._get_collection_links(request, + shares, + self._collection_name) + shares_dict = dict(shares=shares_list) + + if shares_links: + shares_dict['shares_links'] = shares_links + + return shares_dict diff --git a/cinder/api/views/types.py b/cinder/api/views/types.py new file mode 100644 index 0000000000..675ec01fe5 --- /dev/null +++ b/cinder/api/views/types.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common + + +class ViewBuilder(common.ViewBuilder): + + def show(self, request, volume_type, brief=False): + """Trim away extraneous volume type attributes.""" + trimmed = dict(id=volume_type.get('id'), + name=volume_type.get('name'), + extra_specs=volume_type.get('extra_specs')) + return trimmed if brief else dict(volume_type=trimmed) + + def index(self, request, volume_types): + """Index over trimmed volume types""" + volume_types_list = [self.show(request, volume_type, True) + for volume_type in volume_types] + return dict(volume_types=volume_types_list) diff --git a/cinder/api/views/versions.py b/cinder/api/views/versions.py new file mode 100644 index 0000000000..371033ded6 --- /dev/null +++ b/cinder/api/views/versions.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + def __init__(self, base_url): + """ + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [{"rel": "self", + "href": self.generate_href(req.path), }, ], + "media-types": version['media-types'], }) + + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in sorted(versions.keys()): + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', }) + return dict(version=reval) + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href() + + links = [{'rel': 'self', + 'href': href, }, ] + + return links + + def generate_href(self, path=None): + """Create an url that refers to a specific version_number.""" + version_number = 'v1' + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/cinder/api/xmlutil.py b/cinder/api/xmlutil.py new file mode 100644 index 0000000000..93b722e1f5 --- /dev/null +++ b/cinder/api/xmlutil.py @@ -0,0 +1,911 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path + +from lxml import etree + +from cinder import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' +XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1' +XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-volume/2.0/' + 'content') +XMLNS_SHARE_V1 = '' + + +def validate_schema(xml, schema_name): + if isinstance(xml, str): + xml = etree.fromstring(xml) + base_path = 'cinder/api/schemas/v1.1/' + if schema_name in ('atom', 'atom-link'): + base_path = 'cinder/api/schemas/' + schema_path = os.path.join(utils.cinderdir(), + '%s%s.rng' % (base_path, schema_name)) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) + + +class Selector(object): + """Selects datum to operate on from an object.""" + + def __init__(self, *chain): + """Initialize the selector. + + Each argument is a subsequent index into the object. + """ + + self.chain = chain + + def __repr__(self): + """Return a representation of the selector.""" + + return "Selector" + repr(self.chain) + + def __call__(self, obj, do_raise=False): + """Select a datum to operate on. + + Selects the relevant datum within the object. + + :param obj: The object from which to select the object. + :param do_raise: If False (the default), return None if the + indexed datum does not exist. Otherwise, + raise a KeyError. + """ + + # Walk the selector list + for elem in self.chain: + # If it's callable, call it + if callable(elem): + obj = elem(obj) + else: + # Use indexing + try: + obj = obj[elem] + except (KeyError, IndexError): + # No sense going any further + if do_raise: + # Convert to a KeyError, for consistency + raise KeyError(elem) + return None + + # Return the finally-selected object + return obj + + +def get_items(obj): + """Get items in obj.""" + + return list(obj.items()) + + +class EmptyStringSelector(Selector): + """Returns the empty string if Selector would return None.""" + def __call__(self, obj, do_raise=False): + """Returns empty string if the selected value does not exist.""" + + try: + return super(EmptyStringSelector, self).__call__(obj, True) + except KeyError: + return "" + + +class ConstantSelector(object): + """Returns a constant.""" + + def __init__(self, value): + """Initialize the selector. + + :param value: The value to return. + """ + + self.value = value + + def __repr__(self): + """Return a representation of the selector.""" + + return repr(self.value) + + def __call__(self, _obj, _do_raise=False): + """Select a datum to operate on. + + Returns a constant value. Compatible with + Selector.__call__(). + """ + + return self.value + + +class TemplateElement(object): + """Represent an element in the template.""" + + def __init__(self, tag, attrib=None, selector=None, subselector=None, + **extra): + """Initialize an element. + + Initializes an element in the template. Keyword arguments + specify attributes to be set on the element; values must be + callables. See TemplateElement.set() for more information. + + :param tag: The name of the tag to create. + :param attrib: An optional dictionary of element attributes. + :param selector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + :param subselector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + This is used to further refine the datum + object returned by selector in the event + that it is a list of objects. + """ + + # Convert selector into a Selector + if selector is None: + selector = Selector() + elif not callable(selector): + selector = Selector(selector) + + # Convert subselector into a Selector + if subselector is not None and not callable(subselector): + subselector = Selector(subselector) + + self.tag = tag + self.selector = selector + self.subselector = subselector + self.attrib = {} + self._text = None + self._children = [] + self._childmap = {} + + # Run the incoming attributes through set() so that they + # become selectorized + if not attrib: + attrib = {} + attrib.update(extra) + for k, v in attrib.items(): + self.set(k, v) + + def __repr__(self): + """Return a representation of the template element.""" + + return ('<%s.%s %r at %#x>' % + (self.__class__.__module__, self.__class__.__name__, + self.tag, id(self))) + + def __len__(self): + """Return the number of child elements.""" + + return len(self._children) + + def __contains__(self, key): + """Determine whether a child node named by key exists.""" + + return key in self._childmap + + def __getitem__(self, idx): + """Retrieve a child node by index or name.""" + + if isinstance(idx, basestring): + # Allow access by node name + return self._childmap[idx] + else: + return self._children[idx] + + def append(self, elem): + """Append a child to the element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.append(elem) + self._childmap[elem.tag] = elem + + def extend(self, elems): + """Append children to the element.""" + + # Pre-evaluate the elements + elemmap = {} + elemlist = [] + for elem in elems: + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap or elem.tag in elemmap: + raise KeyError(elem.tag) + + elemmap[elem.tag] = elem + elemlist.append(elem) + + # Update the children + self._children.extend(elemlist) + self._childmap.update(elemmap) + + def insert(self, idx, elem): + """Insert a child element at the given index.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.insert(idx, elem) + self._childmap[elem.tag] = elem + + def remove(self, elem): + """Remove a child element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Check if element exists + if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: + raise ValueError(_('element is not a child')) + + self._children.remove(elem) + del self._childmap[elem.tag] + + def get(self, key): + """Get an attribute. + + Returns a callable which performs datum selection. + + :param key: The name of the attribute to get. + """ + + return self.attrib[key] + + def set(self, key, value=None): + """Set an attribute. + + :param key: The name of the attribute to set. + + :param value: A callable taking an object and optional boolean + do_raise indicator and returning the datum bound + to the attribute. If None, a Selector() will be + constructed from the key. If a string, a + Selector() will be constructed from the string. + """ + + # Convert value to a selector + if value is None: + value = Selector(key) + elif not callable(value): + value = Selector(value) + + self.attrib[key] = value + + def keys(self): + """Return the attribute names.""" + + return self.attrib.keys() + + def items(self): + """Return the attribute names and values.""" + + return self.attrib.items() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # We are a template element + return self + + def wrap(self): + """Wraps a template element to return a template.""" + + # Wrap in a basic Template + return Template(self) + + def apply(self, elem, obj): + """Apply text and attributes to an etree.Element. + + Applies the text and attribute instructions in the template + element to an etree.Element instance. + + :param elem: An etree.Element instance. + :param obj: The base object associated with this template + element. + """ + + # Start with the text... + if self.text is not None: + elem.text = unicode(self.text(obj)) + + # Now set up all the attributes... + for key, value in self.attrib.items(): + try: + elem.set(key, unicode(value(obj, True))) + except KeyError: + # Attribute has no value, so don't include it + pass + + def _render(self, parent, datum, patches, nsmap): + """Internal rendering. + + Renders the template node into an etree.Element object. + Returns the etree.Element object. + + :param parent: The parent etree.Element instance. + :param datum: The datum associated with this template element. + :param patches: A list of other template elements that must + also be applied. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance. + """ + + # Allocate a node + if callable(self.tag): + tagname = self.tag(datum) + else: + tagname = self.tag + elem = etree.Element(tagname, nsmap=nsmap) + + # If we have a parent, append the node to the parent + if parent is not None: + parent.append(elem) + + # If the datum is None, do nothing else + if datum is None: + return elem + + # Apply this template element to the element + self.apply(elem, datum) + + # Additionally, apply the patches + for patch in patches: + patch.apply(elem, datum) + + # We have fully rendered the element; return it + return elem + + def render(self, parent, obj, patches=[], nsmap=None): + """Render an object. + + Renders an object against this template node. Returns a list + of two-item tuples, where the first item is an etree.Element + instance and the second item is the datum associated with that + instance. + + :param parent: The parent for the etree.Element instances. + :param obj: The object to render this template element + against. + :param patches: A list of other template elements to apply + when rendering this template element. + :param nsmap: An optional namespace dictionary to attach to + the etree.Element instances. + """ + + # First, get the datum we're rendering + data = None if obj is None else self.selector(obj) + + # Check if we should render at all + if not self.will_render(data): + return [] + elif data is None: + return [(self._render(parent, None, patches, nsmap), None)] + + # Make the data into a list if it isn't already + if not isinstance(data, list): + data = [data] + elif parent is None: + raise ValueError(_('root element selecting a list')) + + # Render all the elements + elems = [] + for datum in data: + if self.subselector is not None: + datum = self.subselector(datum) + elems.append((self._render(parent, datum, patches, nsmap), datum)) + + # Return all the elements rendered, as well as the + # corresponding datum for the next step down the tree + return elems + + def will_render(self, datum): + """Hook method. + + An overridable hook method to determine whether this template + element will be rendered at all. By default, returns False + (inhibiting rendering) if the datum is None. + + :param datum: The datum associated with this template element. + """ + + # Don't render if datum is None + return datum is not None + + def _text_get(self): + """Template element text. + + Either None or a callable taking an object and optional + boolean do_raise indicator and returning the datum bound to + the text of the template element. + """ + + return self._text + + def _text_set(self, value): + # Convert value to a selector + if value is not None and not callable(value): + value = Selector(value) + + self._text = value + + def _text_del(self): + self._text = None + + text = property(_text_get, _text_set, _text_del) + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template rooted at this + element as a string, suitable for inclusion in debug logs. + """ + + # Build the inner contents of the tag... + contents = [self.tag, '!selector=%r' % self.selector] + + # Add the text... + if self.text is not None: + contents.append('!text=%r' % self.text) + + # Add all the other attributes + for key, value in self.attrib.items(): + contents.append('%s=%r' % (key, value)) + + # If there are no children, return it as a closed tag + if len(self) == 0: + return '<%s/>' % ' '.join([str(i) for i in contents]) + + # OK, recurse to our children + children = [c.tree() for c in self] + + # Return the result + return ('<%s>%s' % + (' '.join(contents), ''.join(children), self.tag)) + + +def SubTemplateElement(parent, tag, attrib=None, selector=None, + subselector=None, **extra): + """Create a template element as a child of another. + + Corresponds to the etree.SubElement interface. Parameters are as + for TemplateElement, with the addition of the parent. + """ + + # Convert attributes + attrib = attrib or {} + attrib.update(extra) + + # Get a TemplateElement + elem = TemplateElement(tag, attrib=attrib, selector=selector, + subselector=subselector) + + # Append the parent safely + if parent is not None: + parent.append(elem) + + return elem + + +class Template(object): + """Represent a template.""" + + def __init__(self, root, nsmap=None): + """Initialize a template. + + :param root: The root element of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + self.root = root.unwrap() if root is not None else None + self.nsmap = nsmap or {} + self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) + + def _serialize(self, parent, obj, siblings, nsmap=None): + """Internal serialization. + + Recursive routine to build a tree of etree.Element instances + from an object based on the template. Returns the first + etree.Element instance rendered, or None. + + :param parent: The parent etree.Element instance. Can be + None. + :param obj: The object to render. + :param siblings: The TemplateElement instances against which + to render the object. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance + rendered. + """ + + # First step, render the element + elems = siblings[0].render(parent, obj, siblings[1:], nsmap) + + # Now, recurse to all child elements + seen = set() + for idx, sibling in enumerate(siblings): + for child in sibling: + # Have we handled this child already? + if child.tag in seen: + continue + seen.add(child.tag) + + # Determine the child's siblings + nieces = [child] + for sib in siblings[idx + 1:]: + if child.tag in sib: + nieces.append(sib[child.tag]) + + # Now we recurse for every data element + for elem, datum in elems: + self._serialize(elem, datum, nieces) + + # Return the first element; at the top level, this will be the + # root element + if elems: + return elems[0][0] + + def serialize(self, obj, *args, **kwargs): + """Serialize an object. + + Serializes an object against the template. Returns a string + with the serialized XML. Positional and keyword arguments are + passed to etree.tostring(). + + :param obj: The object to serialize. + """ + + elem = self.make_tree(obj) + if elem is None: + return '' + + for k, v in self.serialize_options.items(): + kwargs.setdefault(k, v) + + # Serialize it into XML + return etree.tostring(elem, *args, **kwargs) + + def make_tree(self, obj): + """Create a tree. + + Serializes an object against the template. Returns an Element + node with appropriate children. + + :param obj: The object to serialize. + """ + + # If the template is empty, return the empty string + if self.root is None: + return None + + # Get the siblings and nsmap of the root element + siblings = self._siblings() + nsmap = self._nsmap() + + # Form the element tree + return self._serialize(None, obj, siblings, nsmap) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. By default, this is the root element itself. + """ + + return [self.root] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + """ + + return self.nsmap.copy() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # Return the root element + return self.root + + def wrap(self): + """Wraps a template element to return a template.""" + + # We are a template + return self + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. + + :param master: The master template to test. + """ + + return True + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template as a string, suitable + for inclusion in debug logs. + """ + + return "%r: %s" % (self, self.root.tree()) + + +class MasterTemplate(Template): + """Represent a master template. + + Master templates are versioned derivatives of templates that + additionally allow slave templates to be attached. Slave + templates allow modification of the serialized result without + directly changing the master. + """ + + def __init__(self, root, version, nsmap=None): + """Initialize a master template. + + :param root: The root element of the template. + :param version: The version number of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(MasterTemplate, self).__init__(root, nsmap) + self.version = version + self.slaves = [] + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object version %s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.version, id(self))) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. This is the root element plus the root elements of + all the slave templates. + """ + + return [self.root] + [slave.root for slave in self.slaves] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + The namespace dictionary is computed by taking the master + template's namespace dictionary and updating it from all the + slave templates. + """ + + nsmap = self.nsmap.copy() + for slave in self.slaves: + nsmap.update(slave._nsmap()) + return nsmap + + def attach(self, *slaves): + """Attach one or more slave templates. + + Attaches one or more slave templates to the master template. + Slave templates must have a root element with the same tag as + the master template. The slave template's apply() method will + be called to determine if the slave should be applied to this + master; if it returns False, that slave will be skipped. + (This allows filtering of slaves based on the version of the + master template.) + """ + + slave_list = [] + for slave in slaves: + slave = slave.wrap() + + # Make sure we have a tree match + if slave.root.tag != self.root.tag: + slavetag = slave.root.tag + mastertag = self.root.tag + msg = _("Template tree mismatch; adding slave %(slavetag)s " + "to master %(mastertag)s") % locals() + raise ValueError(msg) + + # Make sure slave applies to this template + if not slave.apply(self): + continue + + slave_list.append(slave) + + # Add the slaves + self.slaves.extend(slave_list) + + def copy(self): + """Return a copy of this master template.""" + + # Return a copy of the MasterTemplate + tmp = self.__class__(self.root, self.version, self.nsmap) + tmp.slaves = self.slaves[:] + return tmp + + +class SlaveTemplate(Template): + """Represent a slave template. + + Slave templates are versioned derivatives of templates. Each + slave has a minimum version and optional maximum version of the + master template to which they can be attached. + """ + + def __init__(self, root, min_vers, max_vers=None, nsmap=None): + """Initialize a slave template. + + :param root: The root element of the template. + :param min_vers: The minimum permissible version of the master + template for this slave template to apply. + :param max_vers: An optional upper bound for the master + template version. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(SlaveTemplate, self).__init__(root, nsmap) + self.min_vers = min_vers + self.max_vers = max_vers + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object versions %s-%s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.min_vers, self.max_vers, id(self))) + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. This + version requires the master template to have a version number + between min_vers and max_vers. + + :param master: The master template to test. + """ + + # Does the master meet our minimum version requirement? + if master.version < self.min_vers: + return False + + # How about our maximum version requirement? + if self.max_vers is not None and master.version > self.max_vers: + return False + + return True + + +class TemplateBuilder(object): + """Template builder. + + This class exists to allow templates to be lazily built without + having to build them each time they are needed. It must be + subclassed, and the subclass must implement the construct() + method, which must return a Template (or subclass) instance. The + constructor will always return the template returned by + construct(), or, if it has a copy() method, a copy of that + template. + """ + + _tmpl = None + + def __new__(cls, copy=True): + """Construct and return a template. + + :param copy: If True (the default), a copy of the template + will be constructed and returned, if possible. + """ + + # Do we need to construct the template? + if cls._tmpl is None: + tmp = super(TemplateBuilder, cls).__new__(cls) + + # Construct the template + cls._tmpl = tmp.construct() + + # If the template has a copy attribute, return the result of + # calling it + if copy and hasattr(cls._tmpl, 'copy'): + return cls._tmpl.copy() + + # Return the template + return cls._tmpl + + def construct(self): + """Construct a template. + + Called to construct a template instance, which it must return. + Only called once. + """ + + raise NotImplementedError(_("subclasses must implement construct()!")) + + +def make_links(parent, selector=None): + """ + Attach an Atom element to the parent. + """ + + elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, + selector=selector) + elem.set('rel') + elem.set('type') + elem.set('href') + + # Just for completeness... + return elem + + +def make_flat_dict(name, selector=None, subselector=None, ns=None): + """ + Utility for simple XML templates that traditionally used + XMLDictSerializer with no metadata. Returns a template element + where the top-level element has the given tag name, and where + sub-elements have tag names derived from the object's keys and + text derived from the object's values. This only works for flat + dictionary objects, not dictionaries containing nested lists or + dictionaries. + """ + + # Set up the names we need... + if ns is None: + elemname = name + tagname = Selector(0) + else: + elemname = '{%s}%s' % (ns, name) + tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) + + if selector is None: + selector = name + + # Build the root element + root = TemplateElement(elemname, selector=selector, + subselector=subselector) + + # Build an element to represent all the keys and values + elem = SubTemplateElement(root, tagname, selector=get_items) + elem.text = 1 + + # Return the template + return root diff --git a/cinder/backup/__init__.py b/cinder/backup/__init__.py new file mode 100644 index 0000000000..368e2ffff0 --- /dev/null +++ b/cinder/backup/__init__.py @@ -0,0 +1,23 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.backup import ' elsewhere. + +import cinder.flags +import cinder.openstack.common.importutils + +API = cinder.openstack.common.importutils.import_class( + cinder.flags.FLAGS.backup_api_class) diff --git a/cinder/backup/api.py b/cinder/backup/api.py new file mode 100644 index 0000000000..1b5d1d49bd --- /dev/null +++ b/cinder/backup/api.py @@ -0,0 +1,171 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to the volume backups service. +""" + +from eventlet import greenthread + +from cinder.backup import rpcapi as backup_rpcapi +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +import cinder.volume + + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +class API(base.Base): + """API for interacting with the volume backup manager.""" + + def __init__(self, db_driver=None): + self.backup_rpcapi = backup_rpcapi.BackupAPI() + self.volume_api = cinder.volume.API() + super(API, self).__init__(db_driver) + + def get(self, context, backup_id): + rv = self.db.backup_get(context, backup_id) + return dict(rv.iteritems()) + + def delete(self, context, backup_id): + """ + Make the RPC call to delete a volume backup. + """ + backup = self.get(context, backup_id) + if backup['status'] not in ['available', 'error']: + msg = _('Backup status must be available or error') + raise exception.InvalidBackup(reason=msg) + + self.db.backup_update(context, backup_id, {'status': 'deleting'}) + self.backup_rpcapi.delete_backup(context, + backup['host'], + backup['id']) + + # TODO(moorehef): Add support for search_opts, discarded atm + def get_all(self, context, search_opts={}): + if context.is_admin: + backups = self.db.backup_get_all(context) + else: + backups = self.db.backup_get_all_by_project(context, + context.project_id) + + return backups + + def create(self, context, name, description, volume_id, + container, availability_zone=None): + """ + Make the RPC call to create a volume backup. + """ + volume = self.volume_api.get(context, volume_id) + if volume['status'] != "available": + msg = _('Volume to be backed up must be available') + raise exception.InvalidVolume(reason=msg) + self.db.volume_update(context, volume_id, {'status': 'backing-up'}) + + options = {'user_id': context.user_id, + 'project_id': context.project_id, + 'display_name': name, + 'display_description': description, + 'volume_id': volume_id, + 'status': 'creating', + 'container': container, + 'size': volume['size'], + # TODO(DuncanT): This will need de-managling once + # multi-backend lands + 'host': volume['host'], } + + backup = self.db.backup_create(context, options) + + #TODO(DuncanT): In future, when we have a generic local attach, + # this can go via the scheduler, which enables + # better load ballancing and isolation of services + self.backup_rpcapi.create_backup(context, + backup['host'], + backup['id'], + volume_id) + + return backup + + def restore(self, context, backup_id, volume_id=None): + """ + Make the RPC call to restore a volume backup. + """ + backup = self.get(context, backup_id) + if backup['status'] != 'available': + msg = _('Backup status must be available') + raise exception.InvalidBackup(reason=msg) + + size = backup['size'] + if size is None: + msg = _('Backup to be restored has invalid size') + raise exception.InvalidBackup(reason=msg) + + # Create a volume if none specified. If a volume is specified check + # it is large enough for the backup + if volume_id is None: + name = 'restore_backup_%s' % backup_id + description = 'auto-created_from_restore_from_swift' + + LOG.audit(_("Creating volume of %(size)s GB for restore of " + "backup %(backup_id)s"), locals(), context=context) + volume = self.volume_api.create(context, size, name, description) + volume_id = volume['id'] + + while True: + volume = self.volume_api.get(context, volume_id) + if volume['status'] != 'creating': + break + greenthread.sleep(1) + else: + volume = self.volume_api.get(context, volume_id) + volume_size = volume['size'] + if volume_size < size: + err = _('volume size %(volume_size)d is too small to restore ' + 'backup of size %(size)d.') % locals() + raise exception.InvalidVolume(reason=err) + + if volume['status'] != "available": + msg = _('Volume to be restored to must be available') + raise exception.InvalidVolume(reason=msg) + + LOG.debug('Checking backup size %s against volume size %s', + size, volume['size']) + if size > volume['size']: + msg = _('Volume to be restored to is smaller ' + 'than the backup to be restored') + raise exception.InvalidVolume(reason=msg) + + LOG.audit(_("Overwriting volume %(volume_id)s with restore of " + "backup %(backup_id)s"), locals(), context=context) + + # Setting the status here rather than setting at start and unrolling + # for each error condition, it should be a very small window + self.db.backup_update(context, backup_id, {'status': 'restoring'}) + self.db.volume_update(context, volume_id, {'status': + 'restoring-backup'}) + self.backup_rpcapi.restore_backup(context, + backup['host'], + backup['id'], + volume_id) + + d = {'backup_id': backup_id, + 'volume_id': volume_id, } + + return d diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py new file mode 100755 index 0000000000..b4a972327b --- /dev/null +++ b/cinder/backup/manager.py @@ -0,0 +1,264 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Backup manager manages volume backups. + +Volume Backups are full copies of persistent volumes stored in Swift object +storage. They are usable without the original object being available. A +volume backup can be restored to the original volume it was created from or +any other available volume with a minimum size of the original volume. +Volume backups can be created, restored, deleted and listed. + +**Related Flags** + +:backup_topic: What :mod:`rpc` topic to listen to (default: + `cinder-backup`). +:backup_manager: The module name of a class derived from + :class:`manager.Manager` (default: + :class:`cinder.backup.manager.Manager`). + +""" + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +backup_manager_opts = [ + cfg.StrOpt('backup_service', + default='cinder.backup.services.swift', + help='Service to use for backups.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(backup_manager_opts) + + +class BackupManager(manager.SchedulerDependentManager): + """Manages backup of block storage devices.""" + + RPC_API_VERSION = '1.0' + + def __init__(self, service_name=None, *args, **kwargs): + self.service = importutils.import_module(FLAGS.backup_service) + self.az = FLAGS.storage_availability_zone + self.volume_manager = importutils.import_object(FLAGS.volume_manager) + self.driver = self.volume_manager.driver + super(BackupManager, self).__init__(service_name='backup', + *args, **kwargs) + self.driver.db = self.db + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service.""" + + ctxt = context.get_admin_context() + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + + LOG.info(_("Cleaning up incomplete backup operations")) + volumes = self.db.volume_get_all_by_host(ctxt, self.host) + for volume in volumes: + if volume['status'] == 'backing-up': + LOG.info(_('Resetting volume %s to available ' + '(was backing-up)') % volume['id']) + self.volume_manager.detach_volume(ctxt, volume['id']) + if volume['status'] == 'restoring-backup': + LOG.info(_('Resetting volume %s to error_restoring ' + '(was restoring-backup)') % volume['id']) + self.volume_manager.detach_volume(ctxt, volume['id']) + self.db.volume_update(ctxt, volume['id'], + {'status': 'error_restoring'}) + + # TODO(smulcahy) implement full resume of backup and restore + # operations on restart (rather than simply resetting) + backups = self.db.backup_get_all_by_host(ctxt, self.host) + for backup in backups: + if backup['status'] == 'creating': + LOG.info(_('Resetting backup %s to error ' + '(was creating)') % backup['id']) + err = 'incomplete backup reset on manager restart' + self.db.backup_update(ctxt, backup['id'], {'status': 'error', + 'fail_reason': err}) + if backup['status'] == 'restoring': + LOG.info(_('Resetting backup %s to available ' + '(was restoring)') % backup['id']) + self.db.backup_update(ctxt, backup['id'], + {'status': 'available'}) + if backup['status'] == 'deleting': + LOG.info(_('Resuming delete on backup: %s') % backup['id']) + self.delete_backup(ctxt, backup['id']) + + def create_backup(self, context, backup_id): + """ + Create volume backups using configured backup service. + """ + backup = self.db.backup_get(context, backup_id) + volume_id = backup['volume_id'] + volume = self.db.volume_get(context, volume_id) + LOG.info(_('create_backup started, backup: %(backup_id)s for ' + 'volume: %(volume_id)s') % locals()) + self.db.backup_update(context, backup_id, {'host': self.host, + 'service': + FLAGS.backup_service}) + + expected_status = 'backing-up' + actual_status = volume['status'] + if actual_status != expected_status: + err = _('create_backup aborted, expected volume status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidVolume(reason=err) + + expected_status = 'creating' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('create_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_service(context) + self.driver.backup_volume(context, backup, backup_service) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'available'}) + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': unicode(err)}) + + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'available', + 'size': volume['size'], + 'availability_zone': + self.az}) + LOG.info(_('create_backup finished. backup: %s'), backup_id) + + def restore_backup(self, context, backup_id, volume_id): + """ + Restore volume backups from configured backup service. + """ + LOG.info(_('restore_backup started, restoring backup: %(backup_id)s' + ' to volume: %(volume_id)s') % locals()) + backup = self.db.backup_get(context, backup_id) + volume = self.db.volume_get(context, volume_id) + self.db.backup_update(context, backup_id, {'host': self.host}) + + expected_status = 'restoring-backup' + actual_status = volume['status'] + if actual_status != expected_status: + err = _('restore_backup aborted, expected volume status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'available'}) + raise exception.InvalidVolume(reason=err) + + expected_status = 'restoring' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('restore_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + self.db.volume_update(context, volume_id, {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + if volume['size'] > backup['size']: + LOG.warn('volume: %s, size: %d is larger than backup: %s, ' + 'size: %d, continuing with restore', + volume['id'], volume['size'], + backup['id'], backup['size']) + + backup_service = backup['service'] + configured_service = FLAGS.backup_service + if backup_service != configured_service: + err = _('restore_backup aborted, the backup service currently' + ' configured [%(configured_service)s] is not the' + ' backup service that was used to create this' + ' backup [%(backup_service)s]') % locals() + self.db.backup_update(context, backup_id, {'status': 'available'}) + self.db.volume_update(context, volume_id, {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_service(context) + self.driver.restore_backup(context, backup, volume, + backup_service) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'error_restoring'}) + self.db.backup_update(context, backup_id, + {'status': 'available'}) + + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'available'}) + LOG.info(_('restore_backup finished, backup: %(backup_id)s restored' + ' to volume: %(volume_id)s') % locals()) + + def delete_backup(self, context, backup_id): + """ + Delete volume backup from configured backup service. + """ + backup = self.db.backup_get(context, backup_id) + LOG.info(_('delete_backup started, backup: %s'), backup_id) + self.db.backup_update(context, backup_id, {'host': self.host}) + + expected_status = 'deleting' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('delete_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s') % locals() + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidBackup(reason=err) + + backup_service = backup['service'] + if backup_service is not None: + configured_service = FLAGS.backup_service + if backup_service != configured_service: + err = _('delete_backup aborted, the backup service currently' + ' configured [%(configured_service)s] is not the' + ' backup service that was used to create this' + ' backup [%(backup_service)s]') % locals() + self.db.backup_update(context, backup_id, + {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_service(context) + backup_service.delete(backup) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': + unicode(err)}) + + context = context.elevated() + self.db.backup_destroy(context, backup_id) + LOG.info(_('delete_backup finished, backup %s deleted'), backup_id) diff --git a/cinder/backup/rpcapi.py b/cinder/backup/rpcapi.py new file mode 100644 index 0000000000..a0b8771bca --- /dev/null +++ b/cinder/backup/rpcapi.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the volume backup RPC API. +""" + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +LOG = logging.getLogger(__name__) + +FLAGS = flags.FLAGS + + +class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the volume rpc API. + + API version history: + + 1.0 - Initial version. + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self): + super(BackupAPI, self).__init__( + topic=FLAGS.backup_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_backup(self, ctxt, host, backup_id, volume_id): + LOG.debug("create_backup in rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + LOG.debug("create queue topic=%s", topic) + self.cast(ctxt, + self.make_msg('create_backup', + backup_id=backup_id), + topic=topic) + + def restore_backup(self, ctxt, host, backup_id, volume_id): + LOG.debug("restore_backup in rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + LOG.debug("restore queue topic=%s", topic) + self.cast(ctxt, + self.make_msg('restore_backup', + backup_id=backup_id, + volume_id=volume_id), + topic=topic) + + def delete_backup(self, ctxt, host, backup_id): + LOG.debug("delete_backup rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + self.cast(ctxt, + self.make_msg('delete_backup', + backup_id=backup_id), + topic=topic) diff --git a/cinder/backup/services/__init__.py b/cinder/backup/services/__init__.py new file mode 100644 index 0000000000..f745a135ae --- /dev/null +++ b/cinder/backup/services/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cinder/backup/services/swift.py b/cinder/backup/services/swift.py new file mode 100644 index 0000000000..43cbd1ff2d --- /dev/null +++ b/cinder/backup/services/swift.py @@ -0,0 +1,384 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of a backup service that uses Swift as the backend + +**Related Flags** + +:backup_swift_url: The URL of the Swift endpoint (default: + localhost:8080). +:backup_swift_object_size: The size in bytes of the Swift objects used + for volume backups (default: 52428800). +:backup_swift_retry_attempts: The number of retries to make for Swift + operations (default: 10). +:backup_swift_retry_backoff: The backoff time in seconds between retrying + failed Swift operations (default: 10). +:backup_compression_algorithm: Compression algorithm to use for volume + backups. Supported options are: + None (to disable), zlib and bz2 (default: zlib) +""" + +import hashlib +import httplib +import json +import os +import socket +import StringIO + +import eventlet +from oslo.config import cfg + +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from swiftclient import client as swift + +LOG = logging.getLogger(__name__) + +swiftbackup_service_opts = [ + cfg.StrOpt('backup_swift_url', + default='http://localhost:8080/v1/AUTH_', + help='The URL of the Swift endpoint'), + cfg.StrOpt('backup_swift_container', + default='volumebackups', + help='The default Swift container to use'), + cfg.IntOpt('backup_swift_object_size', + default=52428800, + help='The size in bytes of Swift backup objects'), + cfg.IntOpt('backup_swift_retry_attempts', + default=3, + help='The number of retries to make for Swift operations'), + cfg.IntOpt('backup_swift_retry_backoff', + default=2, + help='The backoff time in seconds between Swift retries'), + cfg.StrOpt('backup_compression_algorithm', + default='zlib', + help='Compression algorithm (None to disable)'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(swiftbackup_service_opts) + + +class SwiftBackupService(base.Base): + """Provides backup, restore and delete of backup objects within Swift.""" + + SERVICE_VERSION = '1.0.0' + SERVICE_VERSION_MAPPING = {'1.0.0': '_restore_v1'} + + def _get_compressor(self, algorithm): + try: + if algorithm.lower() in ('none', 'off', 'no'): + return None + elif algorithm.lower() in ('zlib', 'gzip'): + import zlib as compressor + return compressor + elif algorithm.lower() in ('bz2', 'bzip2'): + import bz2 as compressor + return compressor + except ImportError: + pass + + err = _('unsupported compression algorithm: %s') % algorithm + raise ValueError(unicode(err)) + + def __init__(self, context, db_driver=None): + self.context = context + self.swift_url = '%s%s' % (FLAGS.backup_swift_url, + self.context.project_id) + self.az = FLAGS.storage_availability_zone + self.data_block_size_bytes = FLAGS.backup_swift_object_size + self.swift_attempts = FLAGS.backup_swift_retry_attempts + self.swift_backoff = FLAGS.backup_swift_retry_backoff + self.compressor = \ + self._get_compressor(FLAGS.backup_compression_algorithm) + self.conn = swift.Connection(None, None, None, + retries=self.swift_attempts, + preauthurl=self.swift_url, + preauthtoken=self.context.auth_token, + starting_backoff=self.swift_backoff) + super(SwiftBackupService, self).__init__(db_driver) + + def _check_container_exists(self, container): + LOG.debug(_('_check_container_exists: container: %s') % container) + try: + self.conn.head_container(container) + except swift.ClientException as error: + if error.http_status == httplib.NOT_FOUND: + LOG.debug(_('container %s does not exist') % container) + return False + else: + raise + else: + LOG.debug(_('container %s exists') % container) + return True + + def _create_container(self, context, backup): + backup_id = backup['id'] + container = backup['container'] + LOG.debug(_('_create_container started, container: %(container)s,' + 'backup: %(backup_id)s') % locals()) + if container is None: + container = FLAGS.backup_swift_container + self.db.backup_update(context, backup_id, {'container': container}) + if not self._check_container_exists(container): + self.conn.put_container(container) + return container + + def _generate_swift_object_name_prefix(self, backup): + az = 'az_%s' % self.az + backup_name = '%s_backup_%s' % (az, backup['id']) + volume = 'volume_%s' % (backup['volume_id']) + timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S") + prefix = volume + '/' + timestamp + '/' + backup_name + LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix) + return prefix + + def _generate_object_names(self, backup): + prefix = backup['service_metadata'] + swift_objects = self.conn.get_container(backup['container'], + prefix=prefix, + full_listing=True)[1] + swift_object_names = [] + for swift_object in swift_objects: + swift_object_names.append(swift_object['name']) + LOG.debug(_('generated object list: %s') % swift_object_names) + return swift_object_names + + def _metadata_filename(self, backup): + swift_object_name = backup['service_metadata'] + filename = '%s_metadata' % swift_object_name + return filename + + def _write_metadata(self, backup, volume_id, container, object_list): + filename = self._metadata_filename(backup) + LOG.debug(_('_write_metadata started, container name: %(container)s,' + ' metadata filename: %(filename)s') % locals()) + metadata = {} + metadata['version'] = self.SERVICE_VERSION + metadata['backup_id'] = backup['id'] + metadata['volume_id'] = volume_id + metadata['backup_name'] = backup['display_name'] + metadata['backup_description'] = backup['display_description'] + metadata['created_at'] = str(backup['created_at']) + metadata['objects'] = object_list + metadata_json = json.dumps(metadata, sort_keys=True, indent=2) + reader = StringIO.StringIO(metadata_json) + etag = self.conn.put_object(container, filename, reader) + md5 = hashlib.md5(metadata_json).hexdigest() + if etag != md5: + err = _('error writing metadata file to swift, MD5 of metadata' + ' file in swift [%(etag)s] is not the same as MD5 of ' + 'metadata file sent to swift [%(md5)s]') % locals() + raise exception.InvalidBackup(reason=err) + LOG.debug(_('_write_metadata finished')) + + def _read_metadata(self, backup): + container = backup['container'] + filename = self._metadata_filename(backup) + LOG.debug(_('_read_metadata started, container name: %(container)s, ' + 'metadata filename: %(filename)s') % locals()) + (resp, body) = self.conn.get_object(container, filename) + metadata = json.loads(body) + LOG.debug(_('_read_metadata finished (%s)') % metadata) + return metadata + + def backup(self, backup, volume_file): + """Backup the given volume to swift using the given backup metadata.""" + backup_id = backup['id'] + volume_id = backup['volume_id'] + volume = self.db.volume_get(self.context, volume_id) + + if volume['size'] <= 0: + err = _('volume size %d is invalid.') % volume['size'] + raise exception.InvalidVolume(reason=err) + + try: + container = self._create_container(self.context, backup) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + + object_prefix = self._generate_swift_object_name_prefix(backup) + backup['service_metadata'] = object_prefix + self.db.backup_update(self.context, backup_id, {'service_metadata': + object_prefix}) + volume_size_bytes = volume['size'] * 1024 * 1024 * 1024 + availability_zone = self.az + LOG.debug(_('starting backup of volume: %(volume_id)s to swift,' + ' volume size: %(volume_size_bytes)d, swift object names' + ' prefix %(object_prefix)s, availability zone:' + ' %(availability_zone)s') % locals()) + object_id = 1 + object_list = [] + while True: + data_block_size_bytes = self.data_block_size_bytes + object_name = '%s-%05d' % (object_prefix, object_id) + obj = {} + obj[object_name] = {} + obj[object_name]['offset'] = volume_file.tell() + data = volume_file.read(data_block_size_bytes) + obj[object_name]['length'] = len(data) + if data == '': + break + LOG.debug(_('reading chunk of data from volume')) + if self.compressor is not None: + algorithm = FLAGS.backup_compression_algorithm.lower() + obj[object_name]['compression'] = algorithm + data_size_bytes = len(data) + data = self.compressor.compress(data) + comp_size_bytes = len(data) + LOG.debug(_('compressed %(data_size_bytes)d bytes of data' + ' to %(comp_size_bytes)d bytes using ' + '%(algorithm)s') % locals()) + else: + LOG.debug(_('not compressing data')) + obj[object_name]['compression'] = 'none' + + reader = StringIO.StringIO(data) + LOG.debug(_('About to put_object')) + try: + etag = self.conn.put_object(container, object_name, reader) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % locals()) + md5 = hashlib.md5(data).hexdigest() + obj[object_name]['md5'] = md5 + LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % locals()) + if etag != md5: + err = _('error writing object to swift, MD5 of object in ' + 'swift %(etag)s is not the same as MD5 of object sent ' + 'to swift %(md5)s') % locals() + raise exception.InvalidBackup(reason=err) + object_list.append(obj) + object_id += 1 + LOG.debug(_('Calling eventlet.sleep(0)')) + eventlet.sleep(0) + try: + self._write_metadata(backup, volume_id, container, object_list) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + self.db.backup_update(self.context, backup_id, {'object_count': + object_id}) + LOG.debug(_('backup %s finished.') % backup_id) + + def _restore_v1(self, backup, volume_id, metadata, volume_file): + """Restore a v1 swift volume backup from swift.""" + backup_id = backup['id'] + LOG.debug(_('v1 swift volume backup restore of %s started'), backup_id) + container = backup['container'] + metadata_objects = metadata['objects'] + metadata_object_names = [] + for metadata_object in metadata_objects: + metadata_object_names.extend(metadata_object.keys()) + LOG.debug(_('metadata_object_names = %s') % metadata_object_names) + prune_list = [self._metadata_filename(backup)] + swift_object_names = [swift_object_name for swift_object_name in + self._generate_object_names(backup) + if swift_object_name not in prune_list] + if sorted(swift_object_names) != sorted(metadata_object_names): + err = _('restore_backup aborted, actual swift object list in ' + 'swift does not match object list stored in metadata') + raise exception.InvalidBackup(reason=err) + + for metadata_object in metadata_objects: + object_name = metadata_object.keys()[0] + LOG.debug(_('restoring object from swift. backup: %(backup_id)s, ' + 'container: %(container)s, swift object name: ' + '%(object_name)s, volume: %(volume_id)s') % locals()) + try: + (resp, body) = self.conn.get_object(container, object_name) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + compression_algorithm = metadata_object[object_name]['compression'] + decompressor = self._get_compressor(compression_algorithm) + if decompressor is not None: + LOG.debug(_('decompressing data using %s algorithm') % + compression_algorithm) + decompressed = decompressor.decompress(body) + volume_file.write(decompressed) + else: + volume_file.write(body) + + # force flush every write to avoid long blocking write on close + volume_file.flush() + os.fsync(volume_file.fileno()) + # Restoring a backup to a volume can take some time. Yield so other + # threads can run, allowing for among other things the service + # status to be updated + eventlet.sleep(0) + LOG.debug(_('v1 swift volume backup restore of %s finished'), + backup_id) + + def restore(self, backup, volume_id, volume_file): + """Restore the given volume backup from swift.""" + backup_id = backup['id'] + container = backup['container'] + object_prefix = backup['service_metadata'] + LOG.debug(_('starting restore of backup %(object_prefix)s from swift' + ' container: %(container)s, to volume %(volume_id)s, ' + 'backup: %(backup_id)s') % locals()) + try: + metadata = self._read_metadata(backup) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + metadata_version = metadata['version'] + LOG.debug(_('Restoring swift backup version %s'), metadata_version) + try: + restore_func = getattr(self, self.SERVICE_VERSION_MAPPING.get( + metadata_version)) + except TypeError: + err = (_('No support to restore swift backup version %s') + % metadata_version) + raise exception.InvalidBackup(reason=err) + restore_func(backup, volume_id, metadata, volume_file) + LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') % + locals()) + + def delete(self, backup): + """Delete the given backup from swift.""" + container = backup['container'] + LOG.debug('delete started, backup: %s, container: %s, prefix: %s', + backup['id'], container, backup['service_metadata']) + + if container is not None: + swift_object_names = [] + try: + swift_object_names = self._generate_object_names(backup) + except Exception: + LOG.warn(_('swift error while listing objects, continuing' + ' with delete')) + + for swift_object_name in swift_object_names: + try: + self.conn.delete_object(container, swift_object_name) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + except Exception: + LOG.warn(_('swift error while deleting object %s, ' + 'continuing with delete') % swift_object_name) + else: + LOG.debug(_('deleted swift object: %(swift_object_name)s' + ' in container: %(container)s') % locals()) + # Deleting a backup's objects from swift can take some time. + # Yield so other threads can run + eventlet.sleep(0) + + LOG.debug(_('delete %s finished') % backup['id']) + + +def get_backup_service(context): + return SwiftBackupService(context) diff --git a/cinder/brick/__init__.py b/cinder/brick/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/brick/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/brick/iscsi/__init__.py b/cinder/brick/iscsi/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/brick/iscsi/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/brick/iscsi/iscsi.py b/cinder/brick/iscsi/iscsi.py new file mode 100644 index 0000000000..5748697075 --- /dev/null +++ b/cinder/brick/iscsi/iscsi.py @@ -0,0 +1,468 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Helper code for the iSCSI volume driver. + +""" +import os +import re + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import utils as volume_utils + +LOG = logging.getLogger(__name__) + +iscsi_helper_opt = [cfg.StrOpt('iscsi_helper', + default='tgtadm', + help='iscsi target user-land tool to use'), + cfg.StrOpt('volumes_dir', + default='$state_path/volumes', + help='Volume configuration file storage ' + 'directory'), + cfg.StrOpt('iet_conf', + default='/etc/iet/ietd.conf', + help='IET configuration file'), + cfg.StrOpt('lio_initiator_iqns', + default='', + help=('Comma-separatd list of initiator IQNs ' + 'allowed to connect to the ' + 'iSCSI target. (From Nova compute nodes.)' + ) + ), + cfg.StrOpt('iscsi_iotype', + default='fileio', + help=('Sets the behavior of the iSCSI target ' + 'to either perform blockio or fileio ' + 'optionally, auto can be set and Cinder ' + 'will autodetect type of backing device') + ) + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(iscsi_helper_opt) +FLAGS.import_opt('volume_name_template', 'cinder.db') + + +class TargetAdmin(object): + """iSCSI target administration. + + Base class for iSCSI target admin helpers. + """ + + def __init__(self, cmd, execute): + self._cmd = cmd + self.set_execute(execute) + + def set_execute(self, execute): + """Set the function to be used to execute commands.""" + self._execute = execute + + def _run(self, *args, **kwargs): + self._execute(self._cmd, *args, run_as_root=True, **kwargs) + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + """Create a iSCSI target and logical unit""" + raise NotImplementedError() + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + """Remove a iSCSI target and logical unit""" + raise NotImplementedError() + + def _new_target(self, name, tid, **kwargs): + """Create a new iSCSI target.""" + raise NotImplementedError() + + def _delete_target(self, tid, **kwargs): + """Delete a target.""" + raise NotImplementedError() + + def show_target(self, tid, iqn=None, **kwargs): + """Query the given target ID.""" + raise NotImplementedError() + + def _new_logicalunit(self, tid, lun, path, **kwargs): + """Create a new LUN on a target using the supplied path.""" + raise NotImplementedError() + + def _delete_logicalunit(self, tid, lun, **kwargs): + """Delete a logical unit from a target.""" + raise NotImplementedError() + + +class TgtAdm(TargetAdmin): + """iSCSI target administration using tgtadm.""" + + def __init__(self, execute=utils.execute): + super(TgtAdm, self).__init__('tgtadm', execute) + + def _get_target(self, iqn): + (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + lines = out.split('\n') + for line in lines: + if iqn in line: + parsed = line.split() + tid = parsed[1] + return tid[:-1] + + return None + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + # Note(jdg) tid and lun aren't used by TgtAdm but remain for + # compatibility + + utils.ensure_tree(FLAGS.volumes_dir) + + vol_id = name.split(':')[1] + if chap_auth is None: + volume_conf = """ + + backing-store %s + + """ % (name, path) + else: + volume_conf = """ + + backing-store %s + %s + + """ % (name, path, chap_auth) + + LOG.info(_('Creating iscsi_target for: %s') % vol_id) + volumes_dir = FLAGS.volumes_dir + volume_path = os.path.join(volumes_dir, vol_id) + + f = open(volume_path, 'w+') + f.write(volume_conf) + f.close() + + old_persist_file = None + old_name = kwargs.get('old_name', None) + if old_name is not None: + old_persist_file = os.path.join(volumes_dir, old_name) + + try: + (out, err) = self._execute('tgt-admin', + '--update', + name, + run_as_root=True) + except exception.ProcessExecutionError, e: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + + #Don't forget to remove the persistent file we created + os.unlink(volume_path) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id) + tid = self._get_target(iqn) + if tid is None: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s. Please ensure your tgtd config file " + "contains 'include %(volumes_dir)s/*'") % locals()) + raise exception.NotFound() + + if old_persist_file is not None and os.path.exists(old_persist_file): + os.unlink(old_persist_file) + + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + LOG.info(_('Removing iscsi_target for: %s') % vol_id) + vol_uuid_file = FLAGS.volume_name_template % vol_id + volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file) + if os.path.isfile(volume_path): + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, + vol_uuid_file) + else: + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + try: + # NOTE(vish): --force is a workaround for bug: + # https://bugs.launchpad.net/cinder/+bug/1159948 + self._execute('tgt-admin', + '--force', + '--delete', + iqn, + run_as_root=True) + except exception.ProcessExecutionError, e: + LOG.error(_("Failed to remove iscsi target for volume " + "id:%(vol_id)s.") % locals()) + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + + os.unlink(volume_path) + + def show_target(self, tid, iqn=None, **kwargs): + if iqn is None: + raise exception.InvalidParameterValue( + err=_('valid iqn needed for show_target')) + + tid = self._get_target(iqn) + if tid is None: + raise exception.NotFound() + + +class IetAdm(TargetAdmin): + """iSCSI target administration using ietadm.""" + + def __init__(self, execute=utils.execute): + super(IetAdm, self).__init__('ietadm', execute) + + def _iotype(self, path): + if FLAGS.iscsi_iotype == 'auto': + return 'blockio' if volume_utils.is_block(path) else 'fileio' + else: + return FLAGS.iscsi_iotype + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + + # NOTE (jdg): Address bug: 1175207 + kwargs.pop('old_name', None) + + self._new_target(name, tid, **kwargs) + self._new_logicalunit(tid, lun, path, **kwargs) + if chap_auth is not None: + (type, username, password) = chap_auth.split() + self._new_auth(tid, type, username, password, **kwargs) + + conf_file = FLAGS.iet_conf + if os.path.exists(conf_file): + try: + volume_conf = """ + Target %s + %s + Lun 0 Path=%s,Type=%s + """ % (name, chap_auth, path, self._iotype(path)) + + with utils.temporary_chown(conf_file): + f = open(conf_file, 'a+') + f.write(volume_conf) + f.close() + except exception.ProcessExecutionError, e: + vol_id = name.split(':')[1] + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + LOG.info(_('Removing iscsi_target for volume: %s') % vol_id) + self._delete_logicalunit(tid, lun, **kwargs) + self._delete_target(tid, **kwargs) + vol_uuid_file = FLAGS.volume_name_template % vol_id + conf_file = FLAGS.iet_conf + if os.path.exists(conf_file): + with utils.temporary_chown(conf_file): + try: + iet_conf_text = open(conf_file, 'r+') + full_txt = iet_conf_text.readlines() + new_iet_conf_txt = [] + count = 0 + for line in full_txt: + if count > 0: + count -= 1 + continue + elif re.search(vol_uuid_file, line): + count = 2 + continue + else: + new_iet_conf_txt.append(line) + + iet_conf_text.seek(0) + iet_conf_text.truncate(0) + iet_conf_text.writelines(new_iet_conf_txt) + finally: + iet_conf_text.close() + + def _new_target(self, name, tid, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--params', 'Name=%s' % name, + **kwargs) + + def _delete_target(self, tid, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + **kwargs) + + def show_target(self, tid, iqn=None, **kwargs): + self._run('--op', 'show', + '--tid=%s' % tid, + **kwargs) + + def _new_logicalunit(self, tid, lun, path, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--lun=%d' % lun, + '--params', 'Path=%s,Type=%s' % (path, self._iotype(path)), + **kwargs) + + def _delete_logicalunit(self, tid, lun, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + '--lun=%d' % lun, + **kwargs) + + def _new_auth(self, tid, type, username, password, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--user', + '--params=%s=%s,Password=%s' % (type, username, password), + **kwargs) + + +class FakeIscsiHelper(object): + + def __init__(self): + self.tid = 1 + + def set_execute(self, execute): + self._execute = execute + + def create_iscsi_target(self, *args, **kwargs): + self.tid += 1 + return self.tid + + +class LioAdm(TargetAdmin): + """iSCSI target administration for LIO using python-rtslib.""" + def __init__(self, execute=utils.execute): + super(LioAdm, self).__init__('rtstool', execute) + + try: + self._execute('rtstool', 'verify') + except (OSError, exception.ProcessExecutionError): + LOG.error(_('rtstool is not installed correctly')) + raise + + def _get_target(self, iqn): + (out, err) = self._execute('rtstool', + 'get-targets', + run_as_root=True) + lines = out.split('\n') + for line in lines: + if iqn in line: + return line + + return None + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + # tid and lun are not used + + vol_id = name.split(':')[1] + + LOG.info(_('Creating iscsi_target for volume: %s') % vol_id) + + # rtstool requires chap_auth, but unit tests don't provide it + chap_auth_userid = 'test_id' + chap_auth_password = 'test_pass' + + if chap_auth != None: + (chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:] + + extra_args = [] + if FLAGS.lio_initiator_iqns: + extra_args.append(FLAGS.lio_initiator_iqns) + + try: + command_args = ['rtstool', + 'create', + path, + name, + chap_auth_userid, + chap_auth_password] + if extra_args != []: + command_args += extra_args + self._execute(*command_args, run_as_root=True) + except exception.ProcessExecutionError as e: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + LOG.error("%s" % str(e)) + + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id) + tid = self._get_target(iqn) + if tid is None: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s.") % locals()) + raise exception.NotFound() + + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): + LOG.info(_('Removing iscsi_target: %s') % vol_id) + vol_uuid_name = 'volume-%s' % vol_id + iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_uuid_name) + + try: + self._execute('rtstool', + 'delete', + iqn, + run_as_root=True) + except exception.ProcessExecutionError as e: + LOG.error(_("Failed to remove iscsi target for volume " + "id:%(vol_id)s.") % locals()) + LOG.error("%s" % str(e)) + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + + def show_target(self, tid, iqn=None, **kwargs): + if iqn is None: + raise exception.InvalidParameterValue( + err=_('valid iqn needed for show_target')) + + tid = self._get_target(iqn) + if tid is None: + raise exception.NotFound() + + def initialize_connection(self, volume, connector): + volume_iqn = volume['provider_location'].split(' ')[1] + + (auth_method, auth_user, auth_pass) = \ + volume['provider_auth'].split(' ', 3) + + # Add initiator iqns to target ACL + try: + self._execute('rtstool', 'add-initiator', + volume_iqn, + auth_user, + auth_pass, + connector['initiator'], + run_as_root=True) + except exception.ProcessExecutionError as e: + LOG.error(_("Failed to add initiator iqn %s to target") % + connector['initiator']) + raise exception.ISCSITargetAttachFailed(volume_id=volume['id']) + + +def get_target_admin(): + if FLAGS.iscsi_helper == 'tgtadm': + return TgtAdm() + elif FLAGS.iscsi_helper == 'fake': + return FakeIscsiHelper() + elif FLAGS.iscsi_helper == 'lioadm': + return LioAdm() + else: + return IetAdm() diff --git a/cinder/brick/local_dev/__init__.py b/cinder/brick/local_dev/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/brick/local_dev/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py new file mode 100644 index 0000000000..e01104427f --- /dev/null +++ b/cinder/brick/local_dev/lvm.py @@ -0,0 +1,368 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +LVM class for performing LVM operations. +""" + +import math + +from itertools import izip + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + + +class VolumeGroupNotFound(Exception): + def __init__(self, vg_name): + message = (_('Unable to find Volume Group: %s') % vg_name) + super(VolumeGroupNotFound, self).__init__(message) + + +class VolumeGroupCreationFailed(Exception): + def __init__(self, vg_name): + message = (_('Failed to create Volume Group: %s') % vg_name) + super(VolumeGroupCreationFailed, self).__init__(message) + + +class LVM(object): + """LVM object to enable various LVM related operations.""" + + def __init__(self, vg_name, create_vg=False, + physical_volumes=None): + """Initialize the LVM object. + + The LVM object is based on an LVM VolumeGroup, one instantiation + for each VolumeGroup you have/use. + + :param vg_name: Name of existing VG or VG to create + :param create_vg: Indicates the VG doesn't exist + and we want to create it + :param physical_volumes: List of PVs to build VG on + + """ + self.vg_name = vg_name + self.pv_list = [] + self.lv_list = [] + self.vg_size = 0 + self.vg_available_space = 0 + self.vg_lv_count = 0 + self.vg_uuid = None + + if create_vg and physical_volumes is not None: + self.pv_list = physical_volumes + + try: + self._create_vg(physical_volumes) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error creating Volume Group')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise VolumeGroupCreationFailed(vg_name=self.vg_name) + + if self._vg_exists() is False: + LOG.error(_('Unable to locate Volume Group %s') % vg_name) + raise VolumeGroupNotFound(vg_name=vg_name) + + def _size_str(self, size_in_g): + if '.00' in size_in_g: + size_in_g = size_in_g.replace('.00', '') + + if int(size_in_g) == 0: + return '100M' + + return '%sG' % size_in_g + + def _vg_exists(self): + """Simple check to see if VG exists. + + :returns: True if vg specified in object exists, else False + + """ + exists = False + cmd = ['vgs', '--noheadings', '-o', 'name'] + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + if out is not None: + volume_groups = out.split() + if self.vg_name in volume_groups: + exists = True + + return exists + + def _create_vg(self, pv_list): + cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] + putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + def _get_vg_uuid(self): + (out, err) = putils.execute('vgs', '--noheadings', + '-o uuid', self.vg_name) + if out is not None: + return out.split() + else: + return [] + + @staticmethod + def supports_thin_provisioning(): + """Static method to check for thin LVM support on a system. + + :returns: True if supported, False otherwise + + """ + cmd = ['vgs', '--version'] + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + lines = out.split('\n') + + for line in lines: + if 'LVM version' in line: + version_list = line.split() + version = version_list[2] + if '(2)' in version: + version = version.replace('(2)', '') + version_tuple = tuple(map(int, version.split('.'))) + if version_tuple >= (2, 2, 95): + return True + return False + + @staticmethod + def get_all_volumes(vg_name=None): + """Static method to get all LV's on a system. + + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with LV info + + """ + cmd = ['lvs', '--noheadings', '-o', 'vg_name,name,size'] + if vg_name is not None: + cmd += [vg_name] + + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + lv_list = [] + if out is not None: + volumes = out.split() + for vg, name, size in izip(*[iter(volumes)] * 3): + lv_list.append({"vg": vg, "name": name, "size": size}) + + return lv_list + + def get_volumes(self): + """Get all LV's associated with this instantiation (VG). + + :returns: List of Dictionaries with LV info + + """ + self.lv_list = self.get_all_volumes(self.vg_name) + return self.lv_list + + def get_volume(self, name): + """Get reference object of volume specified by name. + + :returns: dict representation of Logical Volume if exists + + """ + ref_list = self.get_volumes() + for r in ref_list: + if r['name'] == name: + return r + + @staticmethod + def get_all_physical_volumes(vg_name=None): + """Static method to get all PVs on a system. + + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with PV info + + """ + cmd = ['pvs', '--noheadings', + '-o', 'vg_name,name,size,free', + '--separator', ':'] + if vg_name is not None: + cmd += [vg_name] + + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + pv_list = [] + if out is not None: + pvs = out.split() + for pv in pvs: + fields = pv.split(':') + pv_list.append({'vg': fields[0], + 'name': fields[1], + 'size': fields[2], + 'available': fields[3]}) + + return pv_list + + def get_physical_volumes(self): + """Get all PVs associated with this instantiation (VG). + + :returns: List of Dictionaries with PV info + + """ + self.pv_list = self.get_all_physical_volumes(self.vg_name) + return self.pv_list + + @staticmethod + def get_all_volume_groups(vg_name=None): + """Static method to get all VGs on a system. + + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with VG info + + """ + cmd = ['vgs', '--noheadings', + '-o', 'name,size,free,lv_count,uuid', + '--separator', ':'] + if vg_name is not None: + cmd += [vg_name] + + (out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True) + + vg_list = [] + if out is not None: + vgs = out.split() + for vg in vgs: + fields = vg.split(':') + vg_list.append({'name': fields[0], + 'size': fields[1], + 'available': fields[2], + 'lv_count': fields[3], + 'uuid': fields[4]}) + + return vg_list + + def update_volume_group_info(self): + """Update VG info for this instantiation. + + Used to update member fields of object and + provide a dict of info for caller. + + :returns: Dictionaries of VG info + + """ + vg_list = self.get_all_volume_groups(self.vg_name) + + if len(vg_list) != 1: + LOG.error(_('Unable to find VG: %s') % self.vg_name) + raise VolumeGroupNotFound(vg_name=self.vg_name) + + self.vg_size = vg_list[0]['size'] + self.vg_available_space = vg_list[0]['available'] + self.vg_lv_count = vg_list[0]['lv_count'] + self.vg_uuid = vg_list[0]['uuid'] + + return vg_list[0] + + def create_thin_pool(self, name=None, size_str=0): + """Creates a thin provisioning pool for this VG. + + :param name: Name to use for pool, default is "-pool" + :param size_str: Size to allocate for pool, default is entire VG + + """ + + if not self.supports_thin_provisioning(): + LOG.error(_('Requested to setup thin provisioning, ' + 'however current LVM version does not ' + 'support it.')) + return None + + if name is None: + name = '%s-pool' % self.vg_name + + if size_str == 0: + self.update_volume_group_info() + size_str = self.vg_size + + self.create_volume(name, size_str, 'thin') + + def create_volume(self, name, size_str, lv_type='default', mirror_count=0): + """Creates a logical volume on the object's VG. + + :param name: Name to use when creating Logical Volume + :param size_str: Size to use when creating Logical Volume + :param lv_type: Type of Volume (default or thin) + :param mirror_count: Use LVM mirroring with specified count + + """ + size = self._size_str(size_str) + cmd = ['lvcreate', '-n', name, self.vg_name] + if lv_type == 'thin': + cmd += ['-T', '-V', size] + else: + cmd += ['-L', size] + + if mirror_count > 0: + cmd += ['-m', mirror_count, '--nosync'] + terras = int(size[:-1]) / 1024.0 + if terras >= 1.5: + rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) + # NOTE(vish): Next power of two for region size. See: + # http://red.ht/U2BPOD + cmd += ['-R', str(rsize)] + + putils.execute(*cmd, + root_helper='sudo', + run_as_root=True) + + def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): + """Creates a snapshot of a logical volume. + + :param name: Name to assign to new snapshot + :param source_lv_name: Name of Logical Volume to snapshot + :param lv_type: Type of LV (default or thin) + + """ + source_lvref = self.get_volume(source_lv_name) + if source_lvref is None: + LOG.error(_("Unable to find LV: %s") % source_lv_name) + return False + cmd = ['lvcreate', '--name', name, + '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] + if lv_type != 'thin': + size = source_lvref['size'] + cmd += ['-L', size] + + putils.execute(*cmd, + root_helper='sudo', + run_as_root=True) + + def delete(self, name): + """Delete logical volume or snapshot. + + :param name: Name of LV to delete + + """ + putils.execute('lvremove', + '-f', + '%s/%s' % (self.vg_name, name), + root_helper='sudo', run_as_root=True) + + def revert(self, snapshot_name): + """Revert an LV from snapshot. + + :param snapshot_name: Name of snapshot to revert + + """ + putils.execute('lvconvert', '--merge', + snapshot_name, root_helper='sudo', + run_as_root=True) diff --git a/cinder/common/__init__.py b/cinder/common/__init__.py new file mode 100644 index 0000000000..0a3b98867a --- /dev/null +++ b/cinder/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/common/sqlalchemyutils.py b/cinder/common/sqlalchemyutils.py new file mode 100755 index 0000000000..19b7ca9ea7 --- /dev/null +++ b/cinder/common/sqlalchemyutils.py @@ -0,0 +1,128 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack LLC. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of paginate query.""" + +import sqlalchemy + +from cinder import exception +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +# copied from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise exception.InvalidInput(reason='Invalid sort key') + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in xrange(0, len(sort_keys)): + crit_attrs = [] + for j in xrange(0, i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + elif sort_dirs[i] == 'asc': + crit_attrs.append((model_attr > marker_values[i])) + else: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query diff --git a/cinder/compute/__init__.py b/cinder/compute/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/compute/aggregate_states.py b/cinder/compute/aggregate_states.py new file mode 100644 index 0000000000..92e1940277 --- /dev/null +++ b/cinder/compute/aggregate_states.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible states for host aggregates. + +An aggregate may be 'created', in which case the admin has triggered its +creation, but the underlying hypervisor pool has not actually being set up +yet. An aggregate may be 'changing', meaning that the underlying hypervisor +pool is being setup. An aggregate may be 'active', in which case the underlying +hypervisor pool is up and running. An aggregate may be 'dismissed' when it has +no hosts and it has been deleted. An aggregate may be in 'error' in all other +cases. +A 'created' aggregate becomes 'changing' during the first request of +adding a host. During a 'changing' status no other requests will be accepted; +this is to allow the hypervisor layer to instantiate the underlying pool +without any potential race condition that may incur in master/slave-based +configurations. The aggregate goes into the 'active' state when the underlying +pool has been correctly instantiated. +All other operations (e.g. add/remove hosts) that succeed will keep the +aggregate in the 'active' state. If a number of continuous requests fail, +an 'active' aggregate goes into an 'error' state. To recover from such a state, +admin intervention is required. Currently an error state is irreversible, +that is, in order to recover from it an aggregate must be deleted. +""" + +CREATED = 'created' +CHANGING = 'changing' +ACTIVE = 'active' +ERROR = 'error' +DISMISSED = 'dismissed' diff --git a/cinder/context.py b/cinder/context.py new file mode 100644 index 0000000000..951496043e --- /dev/null +++ b/cinder/context.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RequestContext: context for requests that persist through all of cinder.""" + +import copy +import uuid + +from cinder.openstack.common import local +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import policy + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None, overwrite=True, + quota_class=None, **kwargs): + """ + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.is_admin = is_admin + if self.is_admin is None: + self.is_admin = policy.check_is_admin(self.roles) + elif self.is_admin and 'admin' not in self.roles: + self.roles.append('admin') + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = timeutils.utcnow() + if isinstance(timestamp, basestring): + timestamp = timeutils.parse_strtime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + self.auth_token = auth_token + self.quota_class = quota_class + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': timeutils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'quota_class': self.quota_class, + 'tenant': self.tenant, + 'user': self.user} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in context.roles: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + # NOTE(sirp): the openstack/common version of RequestContext uses + # tenant/user whereas the Cinder version uses project_id/user_id. We need + # this shim in order to use context-aware code from openstack/common, like + # logging, until we make the switch to using openstack/common's version of + # RequestContext. + @property + def tenant(self): + return self.project_id + + @property + def user(self): + return self.user_id + + +def get_admin_context(read_deleted="no"): + return RequestContext(user_id=None, + project_id=None, + is_admin=True, + read_deleted=read_deleted, + overwrite=False) diff --git a/cinder/db/__init__.py b/cinder/db/__init__.py new file mode 100644 index 0000000000..f4eb417ec9 --- /dev/null +++ b/cinder/db/__init__.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +DB abstraction for Cinder +""" + +from cinder.db.api import * diff --git a/cinder/db/api.py b/cinder/db/api.py new file mode 100644 index 0000000000..dd546e64b9 --- /dev/null +++ b/cinder/db/api.py @@ -0,0 +1,883 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Defines interface for DB access. + +The underlying driver is loaded as a :class:`LazyPluggable`. + +Functions in this module are imported into the cinder.db namespace. Call these +functions from cinder.db namespace, not the cinder.db.api namespace. + +All functions in this module return objects that implement a dictionary-like +interface. Currently, many of these objects are sqlalchemy objects that +implement a dictionary interface. However, a future goal is to have all of +these objects be simple dictionaries. + + +**Related Flags** + +:db_backend: string to lookup in the list of LazyPluggable backends. + `sqlalchemy` is the only supported backend right now. + +:sql_connection: string specifying the sqlalchemy connection to use, like: + `sqlite:///var/lib/cinder/cinder.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: True) + +""" + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder import utils + +db_opts = [ + cfg.StrOpt('db_backend', + default='sqlalchemy', + help='The backend to use for db'), + cfg.BoolOpt('enable_new_services', + default=True, + help='Services to be added to the available pool on create'), + cfg.StrOpt('volume_name_template', + default='volume-%s', + help='Template string to be used to generate volume names'), + cfg.StrOpt('share_name_template', + default='share-%s', + help='Template string to be used to generate share names'), + cfg.StrOpt('share_snapshot_name_template', + default='share-snapshot-%s', + help='Template string to be used to generate share snapshot ' + 'names'), + cfg.StrOpt('snapshot_name_template', + default='snapshot-%s', + help='Template string to be used to generate snapshot names'), + cfg.StrOpt('backup_name_template', + default='backup-%s', + help='Template string to be used to generate backup names'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(db_opts) + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.api') + + +class NoMoreTargets(exception.CinderException): + """No more available targets""" + pass + + +################### + + +def service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + return IMPL.service_destroy(context, service_id) + + +def service_get(context, service_id): + """Get a service or raise if it does not exist.""" + return IMPL.service_get(context, service_id) + + +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to.""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + +def service_get_all(context, disabled=None): + """Get all services.""" + return IMPL.service_get_all(context, disabled) + + +def service_get_all_by_topic(context, topic): + """Get all services for a given topic.""" + return IMPL.service_get_all_by_topic(context, topic) + + +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + +def service_get_all_volume_sorted(context): + """Get all volume services sorted by volume count. + + :returns: a list of (Service, volume_count) tuples. + + """ + return IMPL.service_get_all_volume_sorted(context) + + +def service_get_all_share_sorted(context): + """Get all share services sorted by share count. + + :returns: a list of (Service, share_count) tuples. + + """ + return IMPL.service_get_all_share_sorted(context) + + +def service_get_by_args(context, host, binary): + """Get the state of an service by node name and binary.""" + return IMPL.service_get_by_args(context, host, binary) + + +def service_create(context, values): + """Create a service from the values dictionary.""" + return IMPL.service_create(context, values) + + +def service_update(context, service_id, values): + """Set the given properties on an service and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, service_id, values) + + +################### +def migration_update(context, id, values): + """Update a migration instance.""" + return IMPL.migration_update(context, id, values) + + +def migration_create(context, values): + """Create a migration record.""" + return IMPL.migration_create(context, values) + + +def migration_get(context, migration_id): + """Finds a migration by the id.""" + return IMPL.migration_get(context, migration_id) + + +def migration_get_by_instance_and_status(context, instance_uuid, status): + """Finds a migration by the instance uuid its migrating.""" + return IMPL.migration_get_by_instance_and_status(context, + instance_uuid, + status) + + +def migration_get_all_unconfirmed(context, confirm_window): + """Finds all unconfirmed migrations within the confirmation window.""" + return IMPL.migration_get_all_unconfirmed(context, confirm_window) + + +################### + + +def iscsi_target_count_by_host(context, host): + """Return count of export devices.""" + return IMPL.iscsi_target_count_by_host(context, host) + + +def iscsi_target_create_safe(context, values): + """Create an iscsi_target from the values dictionary. + + The device is not returned. If the create violates the unique + constraints because the iscsi_target and host already exist, + no exception is raised. + + """ + return IMPL.iscsi_target_create_safe(context, values) + + +############### + +def volume_allocate_iscsi_target(context, volume_id, host): + """Atomically allocate a free iscsi_target from the pool.""" + return IMPL.volume_allocate_iscsi_target(context, volume_id, host) + + +def volume_attached(context, volume_id, instance_id, mountpoint): + """Ensure that a volume is set as attached.""" + return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) + + +def volume_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.volume_create(context, values) + + +def volume_data_get_for_host(context, host, session=None): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_host(context, + host, + session) + + +def volume_data_get_for_project(context, project_id, session=None): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_project(context, + project_id, + session) + + +def volume_destroy(context, volume_id): + """Destroy the volume or raise if it does not exist.""" + return IMPL.volume_destroy(context, volume_id) + + +def volume_detached(context, volume_id): + """Ensure that a volume is set as detached.""" + return IMPL.volume_detached(context, volume_id) + + +def volume_get(context, volume_id): + """Get a volume or raise if it does not exist.""" + return IMPL.volume_get(context, volume_id) + + +def volume_get_all(context, marker, limit, sort_key, sort_dir): + """Get all volumes.""" + return IMPL.volume_get_all(context, marker, limit, sort_key, sort_dir) + + +def volume_get_all_by_host(context, host): + """Get all volumes belonging to a host.""" + return IMPL.volume_get_all_by_host(context, host) + + +def volume_get_all_by_instance_uuid(context, instance_uuid): + """Get all volumes belonging to a instance.""" + return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid) + + +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir): + """Get all volumes belonging to a project.""" + return IMPL.volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir) + + +def volume_get_iscsi_target_num(context, volume_id): + """Get the target num (tid) allocated to the volume.""" + return IMPL.volume_get_iscsi_target_num(context, volume_id) + + +def volume_update(context, volume_id, values): + """Set the given properties on an volume and update it. + + Raises NotFound if volume does not exist. + + """ + return IMPL.volume_update(context, volume_id, values) + + +#################### + + +def snapshot_create(context, values): + """Create a snapshot from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Destroy the snapshot or raise if it does not exist.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a snapshot or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all snapshots.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all snapshots belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_get_all_for_volume(context, volume_id): + """Get all snapshots for a volume.""" + return IMPL.snapshot_get_all_for_volume(context, volume_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +def snapshot_data_get_for_project(context, project_id, session=None): + """Get count and gigabytes used for snapshots for specified project.""" + return IMPL.snapshot_data_get_for_project(context, + project_id, + session) + + +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Get all the snapshots inside the window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.snapshot_get_active_by_window(context, begin, end, project_id) + + +#################### + + +def snapshot_metadata_get(context, snapshot_id): + """Get all metadata for a snapshot.""" + return IMPL.snapshot_metadata_get(context, snapshot_id) + + +def snapshot_metadata_delete(context, snapshot_id, key): + """Delete the given metadata item.""" + IMPL.snapshot_metadata_delete(context, snapshot_id, key) + + +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.snapshot_metadata_update(context, snapshot_id, metadata, delete) + + +#################### + + +def volume_metadata_get(context, volume_id): + """Get all metadata for a volume.""" + return IMPL.volume_metadata_get(context, volume_id) + + +def volume_metadata_delete(context, volume_id, key): + """Delete the given metadata item.""" + IMPL.volume_metadata_delete(context, volume_id, key) + + +def volume_metadata_update(context, volume_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.volume_metadata_update(context, volume_id, metadata, delete) + + +################## + + +def volume_type_create(context, values): + """Create a new volume type.""" + return IMPL.volume_type_create(context, values) + + +def volume_type_get_all(context, inactive=False): + """Get all volume types.""" + return IMPL.volume_type_get_all(context, inactive) + + +def volume_type_get(context, id): + """Get volume type by id.""" + return IMPL.volume_type_get(context, id) + + +def volume_type_get_by_name(context, name): + """Get volume type by name.""" + return IMPL.volume_type_get_by_name(context, name) + + +def volume_type_destroy(context, id): + """Delete a volume type.""" + return IMPL.volume_type_destroy(context, id) + + +def volume_get_active_by_window(context, begin, end=None, project_id=None): + """Get all the volumes inside the window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.volume_get_active_by_window(context, begin, end, project_id) + + +#################### + + +def volume_type_extra_specs_get(context, volume_type_id): + """Get all extra specs for a volume type.""" + return IMPL.volume_type_extra_specs_get(context, volume_type_id) + + +def volume_type_extra_specs_delete(context, volume_type_id, key): + """Delete the given extra specs item.""" + IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) + + +def volume_type_extra_specs_update_or_create(context, + volume_type_id, + extra_specs): + """Create or update volume type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.volume_type_extra_specs_update_or_create(context, + volume_type_id, + extra_specs) + + +################### + + +def volume_glance_metadata_create(context, volume_id, key, value): + """Update the Glance metadata for the specified volume.""" + return IMPL.volume_glance_metadata_create(context, + volume_id, + key, + value) + + +def volume_glance_metadata_get(context, volume_id): + """Return the glance metadata for a volume.""" + return IMPL.volume_glance_metadata_get(context, volume_id) + + +def volume_snapshot_glance_metadata_get(context, snapshot_id): + """Return the Glance metadata for the specified snapshot.""" + return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id) + + +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): + """ + Update the Glance metadata for a snapshot by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the snapshot will retain the original metadata. + """ + return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, + volume_id) + + +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): + """ + Update the Glance metadata from a volume (created from a snapshot) by + copying all of the key:value pairs from the originating snapshot. This is + so that the Glance metadata from the original volume is retained. + """ + return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id, + snapshot_id) + + +def volume_glance_metadata_delete_by_volume(context, volume_id): + """Delete the glance metadata for a volume.""" + return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id) + + +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + """Delete the glance metadata for a snapshot.""" + return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) + + +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id): + """ + Update the Glance metadata for a volume by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the volume (clone) will retain the original metadata. + """ + return IMPL.volume_glance_metadata_copy_from_volume_to_volume( + context, + src_volume_id, + volume_id) + +################### + + +def sm_backend_conf_create(context, values): + """Create a new SM Backend Config entry.""" + return IMPL.sm_backend_conf_create(context, values) + + +def sm_backend_conf_update(context, sm_backend_conf_id, values): + """Update a SM Backend Config entry.""" + return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) + + +def sm_backend_conf_delete(context, sm_backend_conf_id): + """Delete a SM Backend Config.""" + return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) + + +def sm_backend_conf_get(context, sm_backend_conf_id): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) + + +def sm_backend_conf_get_by_sr(context, sr_uuid): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) + + +def sm_backend_conf_get_all(context): + """Get all SM Backend Configs.""" + return IMPL.sm_backend_conf_get_all(context) + + +#################### + + +def sm_flavor_create(context, values): + """Create a new SM Flavor entry.""" + return IMPL.sm_flavor_create(context, values) + + +def sm_flavor_update(context, sm_flavor_id, values): + """Update a SM Flavor entry.""" + return IMPL.sm_flavor_update(context, values) + + +def sm_flavor_delete(context, sm_flavor_id): + """Delete a SM Flavor.""" + return IMPL.sm_flavor_delete(context, sm_flavor_id) + + +def sm_flavor_get(context, sm_flavor): + """Get a specific SM Flavor.""" + return IMPL.sm_flavor_get(context, sm_flavor) + + +def sm_flavor_get_all(context): + """Get all SM Flavors.""" + return IMPL.sm_flavor_get_all(context) + + +#################### + + +def sm_volume_create(context, values): + """Create a new child Zone entry.""" + return IMPL.sm_volume_create(context, values) + + +def sm_volume_update(context, volume_id, values): + """Update a child Zone entry.""" + return IMPL.sm_volume_update(context, values) + + +def sm_volume_delete(context, volume_id): + """Delete a child Zone.""" + return IMPL.sm_volume_delete(context, volume_id) + + +def sm_volume_get(context, volume_id): + """Get a specific child Zone.""" + return IMPL.sm_volume_get(context, volume_id) + + +def sm_volume_get_all(context): + """Get all child Zones.""" + return IMPL.sm_volume_get_all(context) + +################### + + +def quota_create(context, project_id, resource, limit): + """Create a quota for the given project and resource.""" + return IMPL.quota_create(context, project_id, resource, limit) + + +def quota_get(context, project_id, resource): + """Retrieve a quota or raise if it does not exist.""" + return IMPL.quota_get(context, project_id, resource) + + +def quota_get_all_by_project(context, project_id): + """Retrieve all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +def quota_update(context, project_id, resource, limit): + """Update a quota or raise if it does not exist.""" + return IMPL.quota_update(context, project_id, resource, limit) + + +def quota_destroy(context, project_id, resource): + """Destroy the quota or raise if it does not exist.""" + return IMPL.quota_destroy(context, project_id, resource) + + +################### + + +def quota_class_create(context, class_name, resource, limit): + """Create a quota class for the given name and resource.""" + return IMPL.quota_class_create(context, class_name, resource, limit) + + +def quota_class_get(context, class_name, resource): + """Retrieve a quota class or raise if it does not exist.""" + return IMPL.quota_class_get(context, class_name, resource) + + +def quota_class_get_all_by_name(context, class_name): + """Retrieve all quotas associated with a given quota class.""" + return IMPL.quota_class_get_all_by_name(context, class_name) + + +def quota_class_update(context, class_name, resource, limit): + """Update a quota class or raise if it does not exist.""" + return IMPL.quota_class_update(context, class_name, resource, limit) + + +def quota_class_destroy(context, class_name, resource): + """Destroy the quota class or raise if it does not exist.""" + return IMPL.quota_class_destroy(context, class_name, resource) + + +def quota_class_destroy_all_by_name(context, class_name): + """Destroy all quotas associated with a given quota class.""" + return IMPL.quota_class_destroy_all_by_name(context, class_name) + + +################### + + +def quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh): + """Create a quota usage for the given project and resource.""" + return IMPL.quota_usage_create(context, project_id, resource, + in_use, reserved, until_refresh) + + +def quota_usage_get(context, project_id, resource): + """Retrieve a quota usage or raise if it does not exist.""" + return IMPL.quota_usage_get(context, project_id, resource) + + +def quota_usage_get_all_by_project(context, project_id): + """Retrieve all usage associated with a given resource.""" + return IMPL.quota_usage_get_all_by_project(context, project_id) + + +################### + + +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire): + """Create a reservation for the given project and resource.""" + return IMPL.reservation_create(context, uuid, usage, project_id, + resource, delta, expire) + + +def reservation_get(context, uuid): + """Retrieve a reservation or raise if it does not exist.""" + return IMPL.reservation_get(context, uuid) + + +def reservation_get_all_by_project(context, project_id): + """Retrieve all reservations associated with a given project.""" + return IMPL.reservation_get_all_by_project(context, project_id) + + +def reservation_destroy(context, uuid): + """Destroy the reservation or raise if it does not exist.""" + return IMPL.reservation_destroy(context, uuid) + + +################### + + +def quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=None): + """Check quotas and create appropriate reservations.""" + return IMPL.quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=project_id) + + +def reservation_commit(context, reservations, project_id=None): + """Commit quota reservations.""" + return IMPL.reservation_commit(context, reservations, + project_id=project_id) + + +def reservation_rollback(context, reservations, project_id=None): + """Roll back quota reservations.""" + return IMPL.reservation_rollback(context, reservations, + project_id=project_id) + + +def quota_destroy_all_by_project(context, project_id): + """Destroy all quotas associated with a given project.""" + return IMPL.quota_destroy_all_by_project(context, project_id) + + +def reservation_expire(context): + """Roll back any expired reservations.""" + return IMPL.reservation_expire(context) + + +################### + + +def backup_get(context, backup_id): + """Get a backup or raise if it does not exist.""" + return IMPL.backup_get(context, backup_id) + + +def backup_get_all(context): + """Get all backups.""" + return IMPL.backup_get_all(context) + + +def backup_get_all_by_host(context, host): + """Get all backups belonging to a host.""" + return IMPL.backup_get_all_by_host(context, host) + + +def backup_create(context, values): + """Create a backup from the values dictionary.""" + return IMPL.backup_create(context, values) + + +def backup_get_all_by_project(context, project_id): + """Get all backups belonging to a project.""" + return IMPL.backup_get_all_by_project(context, project_id) + + +def backup_update(context, backup_id, values): + """ + Set the given properties on a backup and update it. + + Raises NotFound if backup does not exist. + """ + return IMPL.backup_update(context, backup_id, values) + + +def backup_destroy(context, backup_id): + """Destroy the backup or raise if it does not exist.""" + return IMPL.backup_destroy(context, backup_id) + + +#################### + + +def share_create(context, values): + """Create new share.""" + return IMPL.share_create(context, values) + + +def share_update(context, share_id, values): + """Update share fields.""" + return IMPL.share_update(context, share_id, values) + + +def share_get(context, share_id): + """Get share by id.""" + return IMPL.share_get(context, share_id) + + +def share_get_all(context): + """Get all shares.""" + return IMPL.share_get_all(context) + + +def share_get_all_by_host(context, host): + """Returns all shares with given host.""" + return IMPL.share_get_all_by_host(context, host) + + +def share_get_all_by_project(context, project_id): + """Returns all shares with given project ID.""" + return IMPL.share_get_all_by_project(context, project_id) + + +def share_delete(context, share_id): + """Delete share.""" + return IMPL.share_delete(context, share_id) + + +################### + + +def share_access_create(context, values): + """Allow access to share.""" + return IMPL.share_access_create(context, values) + + +def share_access_get(context, access_id): + """Allow access to share.""" + return IMPL.share_access_get(context, access_id) + + +def share_access_get_all_for_share(context, share_id): + """Allow access to share.""" + return IMPL.share_access_get_all_for_share(context, share_id) + + +def share_access_delete(context, access_id): + """Deny access to share.""" + return IMPL.share_access_delete(context, access_id) + + +def share_access_update(context, access_id, values): + """Update access record.""" + return IMPL.share_access_update(context, access_id, values) + + +#################### + + +def share_snapshot_create(context, values): + """Create a snapshot from the values dictionary.""" + return IMPL.share_snapshot_create(context, values) + + +def share_snapshot_destroy(context, snapshot_id): + """Destroy the snapshot or raise if it does not exist.""" + return IMPL.share_snapshot_destroy(context, snapshot_id) + + +def share_snapshot_get(context, snapshot_id): + """Get a snapshot or raise if it does not exist.""" + return IMPL.share_snapshot_get(context, snapshot_id) + + +def share_snapshot_get_all(context): + """Get all snapshots.""" + return IMPL.share_snapshot_get_all(context) + + +def share_snapshot_get_all_by_project(context, project_id): + """Get all snapshots belonging to a project.""" + return IMPL.share_snapshot_get_all_by_project(context, project_id) + + +def share_snapshot_get_all_for_share(context, share_id): + """Get all snapshots for a share.""" + return IMPL.share_snapshot_get_all_for_share(context, share_id) + + +def share_snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + """ + return IMPL.share_snapshot_update(context, snapshot_id, values) + + +def share_snapshot_data_get_for_project(context, project_id, session=None): + """Get count and gigabytes used for snapshots for specified project.""" + return IMPL.share_snapshot_data_get_for_project(context, + project_id, + session=None) + + +#################### diff --git a/cinder/db/base.py b/cinder/db/base.py new file mode 100644 index 0000000000..edfbeb626c --- /dev/null +++ b/cinder/db/base.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base class for classes that need modular database access.""" + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import importutils + +db_driver_opt = cfg.StrOpt('db_driver', + default='cinder.db', + help='driver to use for database access') + +FLAGS = flags.FLAGS +FLAGS.register_opt(db_driver_opt) + + +class Base(object): + """DB driver is injected in the init method.""" + + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/cinder/db/migration.py b/cinder/db/migration.py new file mode 100644 index 0000000000..6079055e50 --- /dev/null +++ b/cinder/db/migration.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +from cinder import utils + + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.migration') + + +INIT_VERSION = 000 + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/cinder/db/sqlalchemy/__init__.py b/cinder/db/sqlalchemy/__init__.py new file mode 100644 index 0000000000..747015af53 --- /dev/null +++ b/cinder/db/sqlalchemy/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py new file mode 100644 index 0000000000..bc0c6c2bc1 --- /dev/null +++ b/cinder/db/sqlalchemy/api.py @@ -0,0 +1,2243 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of SQLAlchemy backend.""" + +import datetime +import uuid +import warnings + +from sqlalchemy.exc import IntegrityError +from sqlalchemy import or_ +from sqlalchemy.orm import joinedload +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql import func + +from cinder.common import sqlalchemyutils +from cinder import db +from cinder.db.sqlalchemy import models +from cinder.db.sqlalchemy.session import get_session +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils + + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def is_admin_context(context): + """Indicates if the request context is an administrator.""" + if not context: + warnings.warn(_('Use of empty request context is deprecated'), + DeprecationWarning) + raise Exception('die') + return context.is_admin + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def authorize_project_context(context, project_id): + """Ensures a request has permission to access the given project.""" + if is_user_context(context): + if not context.project_id: + raise exception.NotAuthorized() + elif context.project_id != project_id: + raise exception.NotAuthorized() + + +def authorize_user_context(context, user_id): + """Ensures a request has permission to access the given user.""" + if is_user_context(context): + if not context.user_id: + raise exception.NotAuthorized() + elif context.user_id != user_id: + raise exception.NotAuthorized() + + +def authorize_quota_class_context(context, class_name): + """Ensures a request has permission to access the given quota class.""" + if is_user_context(context): + if not context.quota_class: + raise exception.NotAuthorized() + elif context.quota_class != class_name: + raise exception.NotAuthorized() + + +def require_admin_context(f): + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]): + raise exception.AdminRequired() + return f(*args, **kwargs) + return wrapper + + +def require_context(f): + """Decorator to require *any* user or admin context. + + This does no authorization for user or project access matching, see + :py:func:`authorize_project_context` and + :py:func:`authorize_user_context`. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]) and not is_user_context(args[0]): + raise exception.NotAuthorized() + return f(*args, **kwargs) + return wrapper + + +def require_volume_exists(f): + """Decorator to require the specified volume to exist. + + Requires the wrapped function to use context and volume_id as + their first two arguments. + """ + + def wrapper(context, volume_id, *args, **kwargs): + db.volume_get(context, volume_id) + return f(context, volume_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def require_snapshot_exists(f): + """Decorator to require the specified snapshot to exist. + + Requires the wrapped function to use context and snapshot_id as + their first two arguments. + """ + + def wrapper(context, snapshot_id, *args, **kwargs): + db.api.snapshot_get(context, snapshot_id) + return f(context, snapshot_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def model_query(context, *args, **kwargs): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + :param session: if present, the session to use + :param read_deleted: if present, overrides context's read_deleted field. + :param project_only: if present and context is user-type, then restrict + query to match the context's project_id. + """ + session = kwargs.get('session') or get_session() + read_deleted = kwargs.get('read_deleted') or context.read_deleted + project_only = kwargs.get('project_only') + + query = session.query(*args) + + if read_deleted == 'no': + query = query.filter_by(deleted=False) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter_by(deleted=True) + else: + raise Exception( + _("Unrecognized read_deleted value '%s'") % read_deleted) + + if project_only and is_user_context(context): + query = query.filter_by(project_id=context.project_id) + + return query + + +def exact_filter(query, model, filters, legal_keys): + """Applies exact match filtering to a query. + + Returns the updated query. Modifies filters argument to remove + filters consumed. + + :param query: query to apply filters to + :param model: model object the query applies to, for IN-style + filtering + :param filters: dictionary of filters; values that are lists, + tuples, sets, or frozensets cause an 'IN' test to + be performed, while exact matching ('==' operator) + is used for other values + :param legal_keys: list of keys to apply exact filtering to + """ + + filter_dict = {} + + # Walk through all the keys + for key in legal_keys: + # Skip ones we're not filtering on + if key not in filters: + continue + + # OK, filtering on this key; what value do we search for? + value = filters.pop(key) + + if isinstance(value, (list, tuple, set, frozenset)): + # Looking for values in a list; apply to query directly + column_attr = getattr(model, key) + query = query.filter(column_attr.in_(value)) + else: + # OK, simple exact match; save for later + filter_dict[key] = value + + # Apply simple exact matches + if filter_dict: + query = query.filter_by(**filter_dict) + + return query + + +################### + + +@require_admin_context +def service_destroy(context, service_id): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.delete(session=session) + + +@require_admin_context +def service_get(context, service_id, session=None): + result = model_query( + context, + models.Service, + session=session).\ + filter_by(id=service_id).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=service_id) + + return result + + +@require_admin_context +def service_get_all(context, disabled=None): + query = model_query(context, models.Service) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + return query.all() + + +@require_admin_context +def service_get_all_by_topic(context, topic): + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() + + +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + result = model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=None) + return result + + +@require_admin_context +def service_get_all_by_host(context, host): + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(host=host).\ + all() + + +@require_admin_context +def _service_get_all_topic_subquery(context, session, topic, subq, label): + sort_value = getattr(subq.c, label) + return model_query(context, models.Service, + func.coalesce(sort_value, 0), + session=session, read_deleted="no").\ + filter_by(topic=topic).\ + filter_by(disabled=False).\ + outerjoin((subq, models.Service.host == subq.c.host)).\ + order_by(sort_value).\ + all() + + +@require_admin_context +def service_get_all_volume_sorted(context): + session = get_session() + with session.begin(): + topic = FLAGS.volume_topic + label = 'volume_gigabytes' + subq = model_query(context, models.Volume.host, + func.sum(models.Volume.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Volume.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_all_share_sorted(context): + session = get_session() + with session.begin(): + topic = FLAGS.share_topic + label = 'share_gigabytes' + subq = model_query(context, models.Share.host, + func.sum(models.Share.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Share.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_by_args(context, host, binary): + result = model_query(context, models.Service).\ + filter_by(host=host).\ + filter_by(binary=binary).\ + first() + + if not result: + raise exception.HostBinaryNotFound(host=host, binary=binary) + + return result + + +@require_admin_context +def service_create(context, values): + service_ref = models.Service() + service_ref.update(values) + if not FLAGS.enable_new_services: + service_ref.disabled = True + service_ref.save() + return service_ref + + +@require_admin_context +def service_update(context, service_id, values): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.update(values) + service_ref.save(session=session) + + +################### + + +def _metadata_refs(metadata_dict, meta_class): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = meta_class() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs + + +def _dict_with_extra_specs(inst_type_query): + """Takes an instance, volume, or instance type query returned + by sqlalchemy and returns it as a dictionary, converting the + extra_specs entry from a list of dicts: + + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + + to a single dict: + + 'extra_specs' : {'k1': 'v1'} + + """ + inst_type_dict = dict(inst_type_query) + extra_specs = dict([(x['key'], x['value']) + for x in inst_type_query['extra_specs']]) + inst_type_dict['extra_specs'] = extra_specs + return inst_type_dict + + +################### + + +@require_admin_context +def iscsi_target_count_by_host(context, host): + return model_query(context, models.IscsiTarget).\ + filter_by(host=host).\ + count() + + +@require_admin_context +def iscsi_target_create_safe(context, values): + iscsi_target_ref = models.IscsiTarget() + + for (key, value) in values.iteritems(): + iscsi_target_ref[key] = value + try: + iscsi_target_ref.save() + return iscsi_target_ref + except IntegrityError: + return None + + +################### + + +@require_context +def quota_get(context, project_id, resource, session=None): + result = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.ProjectQuotaNotFound(project_id=project_id) + + return result + + +@require_context +def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Quota, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_create(context, project_id, resource, limit): + quota_ref = models.Quota() + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit + quota_ref.save() + return quota_ref + + +@require_admin_context +def quota_update(context, project_id, resource, limit): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit + quota_ref.save(session=session) + + +@require_admin_context +def quota_destroy(context, project_id, resource): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.delete(session=session) + + +################### + + +@require_context +def quota_class_get(context, class_name, resource, session=None): + result = model_query(context, models.QuotaClass, session=session, + read_deleted="no").\ + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaClassNotFound(class_name=class_name) + + return result + + +@require_context +def quota_class_get_all_by_name(context, class_name): + authorize_quota_class_context(context, class_name) + + rows = model_query(context, models.QuotaClass, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + result = {'class_name': class_name} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_class_create(context, class_name, resource, limit): + quota_class_ref = models.QuotaClass() + quota_class_ref.class_name = class_name + quota_class_ref.resource = resource + quota_class_ref.hard_limit = limit + quota_class_ref.save() + return quota_class_ref + + +@require_admin_context +def quota_class_update(context, class_name, resource, limit): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.hard_limit = limit + quota_class_ref.save(session=session) + + +@require_admin_context +def quota_class_destroy(context, class_name, resource): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.delete(session=session) + + +@require_admin_context +def quota_class_destroy_all_by_name(context, class_name): + session = get_session() + with session.begin(): + quota_classes = model_query(context, models.QuotaClass, + session=session, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + for quota_class_ref in quota_classes: + quota_class_ref.delete(session=session) + + +################### + + +@require_context +def quota_usage_get(context, project_id, resource, session=None): + result = model_query(context, models.QuotaUsage, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaUsageNotFound(project_id=project_id) + + return result + + +@require_context +def quota_usage_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.QuotaUsage, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) + + return result + + +@require_admin_context +def quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh, session=None): + quota_usage_ref = models.QuotaUsage() + quota_usage_ref.project_id = project_id + quota_usage_ref.resource = resource + quota_usage_ref.in_use = in_use + quota_usage_ref.reserved = reserved + quota_usage_ref.until_refresh = until_refresh + quota_usage_ref.save(session=session) + + return quota_usage_ref + + +################### + + +@require_context +def reservation_get(context, uuid, session=None): + result = model_query(context, models.Reservation, session=session, + read_deleted="no").\ + filter_by(uuid=uuid).first() + + if not result: + raise exception.ReservationNotFound(uuid=uuid) + + return result + + +@require_context +def reservation_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Reservation, read_deleted="no").\ + filter_by(project_id=project_id).all() + + result = {'project_id': project_id} + for row in rows: + result.setdefault(row.resource, {}) + result[row.resource][row.uuid] = row.delta + + return result + + +@require_admin_context +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire, session=None): + reservation_ref = models.Reservation() + reservation_ref.uuid = uuid + reservation_ref.usage_id = usage['id'] + reservation_ref.project_id = project_id + reservation_ref.resource = resource + reservation_ref.delta = delta + reservation_ref.expire = expire + reservation_ref.save(session=session) + return reservation_ref + + +@require_admin_context +def reservation_destroy(context, uuid): + session = get_session() + with session.begin(): + reservation_ref = reservation_get(context, uuid, session=session) + reservation_ref.delete(session=session) + + +################### + + +# NOTE(johannes): The quota code uses SQL locking to ensure races don't +# cause under or over counting of resources. To avoid deadlocks, this +# code always acquires the lock on quota_usages before acquiring the lock +# on reservations. + +def _get_quota_usages(context, session, project_id): + # Broken out for testability + rows = model_query(context, models.QuotaUsage, + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + with_lockmode('update').\ + all() + return dict((row.resource, row) for row in rows) + + +@require_context +def quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=None): + elevated = context.elevated() + session = get_session() + with session.begin(): + if project_id is None: + project_id = context.project_id + + # Get the current usages + usages = _get_quota_usages(context, session, project_id) + + # Handle usage refresh + work = set(deltas.keys()) + while work: + resource = work.pop() + + # Do we need to refresh the usage? + refresh = False + if resource not in usages: + usages[resource] = quota_usage_create(elevated, + project_id, + resource, + 0, 0, + until_refresh or None, + session=session) + refresh = True + elif usages[resource].in_use < 0: + # Negative in_use count indicates a desync, so try to + # heal from that... + refresh = True + elif usages[resource].until_refresh is not None: + usages[resource].until_refresh -= 1 + if usages[resource].until_refresh <= 0: + refresh = True + elif max_age and (usages[resource].updated_at - + timeutils.utcnow()).seconds >= max_age: + refresh = True + + # OK, refresh the usage + if refresh: + # Grab the sync routine + sync = resources[resource].sync + + updates = sync(elevated, project_id, session) + for res, in_use in updates.items(): + # Make sure we have a destination for the usage! + if res not in usages: + usages[res] = quota_usage_create(elevated, + project_id, + res, + 0, 0, + until_refresh or None, + session=session) + + # Update the usage + usages[res].in_use = in_use + usages[res].until_refresh = until_refresh or None + + # Because more than one resource may be refreshed + # by the call to the sync routine, and we don't + # want to double-sync, we make sure all refreshed + # resources are dropped from the work set. + work.discard(res) + + # NOTE(Vek): We make the assumption that the sync + # routine actually refreshes the + # resources that it is the sync routine + # for. We don't check, because this is + # a best-effort mechanism. + + # Check for deltas that would go negative + unders = [resource for resource, delta in deltas.items() + if delta < 0 and + delta + usages[resource].in_use < 0] + + # Now, let's check the quotas + # NOTE(Vek): We're only concerned about positive increments. + # If a project has gone over quota, we want them to + # be able to reduce their usage without any + # problems. + overs = [resource for resource, delta in deltas.items() + if quotas[resource] >= 0 and delta >= 0 and + quotas[resource] < delta + usages[resource].total] + + # NOTE(Vek): The quota check needs to be in the transaction, + # but the transaction doesn't fail just because + # we're over quota, so the OverQuota raise is + # outside the transaction. If we did the raise + # here, our usage updates would be discarded, but + # they're not invalidated by being over-quota. + + # Create the reservations + if not overs: + reservations = [] + for resource, delta in deltas.items(): + reservation = reservation_create(elevated, + str(uuid.uuid4()), + usages[resource], + project_id, + resource, delta, expire, + session=session) + reservations.append(reservation.uuid) + + # Also update the reserved quantity + # NOTE(Vek): Again, we are only concerned here about + # positive increments. Here, though, we're + # worried about the following scenario: + # + # 1) User initiates resize down. + # 2) User allocates a new instance. + # 3) Resize down fails or is reverted. + # 4) User is now over quota. + # + # To prevent this, we only update the + # reserved value if the delta is positive. + if delta > 0: + usages[resource].reserved += delta + + # Apply updates to the usages table + for usage_ref in usages.values(): + usage_ref.save(session=session) + + if unders: + LOG.warning(_("Change will make usage less than 0 for the following " + "resources: %(unders)s") % locals()) + if overs: + usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) + for k, v in usages.items()) + raise exception.OverQuota(overs=sorted(overs), quotas=quotas, + usages=usages) + + return reservations + + +def _quota_reservations(session, context, reservations): + """Return the relevant reservations.""" + + # Get the listed reservations + return model_query(context, models.Reservation, + read_deleted="no", + session=session).\ + filter(models.Reservation.uuid.in_(reservations)).\ + with_lockmode('update').\ + all() + + +@require_context +def reservation_commit(context, reservations, project_id=None): + session = get_session() + with session.begin(): + usages = _get_quota_usages(context, session, project_id) + + for reservation in _quota_reservations(session, context, reservations): + usage = usages[reservation.resource] + if reservation.delta >= 0: + usage.reserved -= reservation.delta + usage.in_use += reservation.delta + + reservation.delete(session=session) + + for usage in usages.values(): + usage.save(session=session) + + +@require_context +def reservation_rollback(context, reservations, project_id=None): + session = get_session() + with session.begin(): + usages = _get_quota_usages(context, session, project_id) + + for reservation in _quota_reservations(session, context, reservations): + usage = usages[reservation.resource] + if reservation.delta >= 0: + usage.reserved -= reservation.delta + + reservation.delete(session=session) + + for usage in usages.values(): + usage.save(session=session) + + +@require_admin_context +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_ref in quotas: + quota_ref.delete(session=session) + + quota_usages = model_query(context, models.QuotaUsage, + session=session, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_usage_ref in quota_usages: + quota_usage_ref.delete(session=session) + + reservations = model_query(context, models.Reservation, + session=session, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for reservation_ref in reservations: + reservation_ref.delete(session=session) + + +@require_admin_context +def reservation_expire(context): + session = get_session() + with session.begin(): + current_time = timeutils.utcnow() + results = model_query(context, models.Reservation, session=session, + read_deleted="no").\ + filter(models.Reservation.expire < current_time).\ + all() + + if results: + for reservation in results: + if reservation.delta >= 0: + reservation.usage.reserved -= reservation.delta + reservation.usage.save(session=session) + + reservation.delete(session=session) + + +################### + + +@require_admin_context +def volume_allocate_iscsi_target(context, volume_id, host): + session = get_session() + with session.begin(): + iscsi_target_ref = model_query(context, models.IscsiTarget, + session=session, read_deleted="no").\ + filter_by(volume=None).\ + filter_by(host=host).\ + with_lockmode('update').\ + first() + + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not iscsi_target_ref: + raise db.NoMoreTargets() + + iscsi_target_ref.volume_id = volume_id + session.add(iscsi_target_ref) + + return iscsi_target_ref.target_num + + +@require_admin_context +def volume_attached(context, volume_id, instance_uuid, mountpoint): + if not uuidutils.is_uuid_like(instance_uuid): + raise exception.InvalidUUID(uuid=instance_uuid) + + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint + volume_ref['attach_status'] = 'attached' + volume_ref['instance_uuid'] = instance_uuid + volume_ref.save(session=session) + + +@require_context +def volume_create(context, values): + values['volume_metadata'] = _metadata_refs(values.get('metadata'), + models.VolumeMetadata) + volume_ref = models.Volume() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + volume_ref.update(values) + + session = get_session() + with session.begin(): + volume_ref.save(session=session) + + return volume_get(context, values['id'], session=session) + + +@require_admin_context +def volume_data_get_for_host(context, host, session=None): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no", + session=session).\ + filter_by(host=host).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_data_get_for_project(context, project_id, session=None): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_destroy(context, volume_id): + session = get_session() + with session.begin(): + session.query(models.Volume).\ + filter_by(id=volume_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.IscsiTarget).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def volume_detached(context, volume_id): + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'available' + volume_ref['mountpoint'] = None + volume_ref['attach_status'] = 'detached' + volume_ref['instance_uuid'] = None + volume_ref.save(session=session) + + +@require_context +def _volume_get_query(context, session=None, project_only=False): + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')) + + +@require_context +def volume_get(context, volume_id, session=None): + result = _volume_get_query(context, session=session, project_only=True).\ + filter_by(id=volume_id).\ + first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result + + +@require_admin_context +def volume_get_all(context, marker, limit, sort_key, sort_dir): + query = _volume_get_query(context) + + marker_volume = None + if marker is not None: + marker_volume = volume_get(context, marker) + + query = sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + return query.all() + + +@require_admin_context +def volume_get_all_by_host(context, host): + return _volume_get_query(context).filter_by(host=host).all() + + +@require_admin_context +def volume_get_all_by_instance_uuid(context, instance_uuid): + result = model_query(context, models.Volume, read_deleted="no").\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')).\ + filter_by(instance_uuid=instance_uuid).\ + all() + + if not result: + return [] + + return result + + +@require_context +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir): + authorize_project_context(context, project_id) + query = _volume_get_query(context).filter_by(project_id=project_id) + + marker_volume = None + if marker is not None: + marker_volume = volume_get(context, marker) + + query = sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + return query.all() + + +@require_admin_context +def volume_get_iscsi_target_num(context, volume_id): + result = model_query(context, models.IscsiTarget, read_deleted="yes").\ + filter_by(volume_id=volume_id).\ + first() + + if not result: + raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) + + return result.target_num + + +@require_context +def volume_update(context, volume_id, values): + session = get_session() + metadata = values.get('metadata') + if metadata is not None: + volume_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True) + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref.update(values) + volume_ref.save(session=session) + return volume_ref + + +#################### + +def _volume_metadata_get_query(context, volume_id, session=None): + return model_query(context, models.VolumeMetadata, + session=session, read_deleted="no").\ + filter_by(volume_id=volume_id) + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + rows = _volume_metadata_get_query(context, volume_id).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_volume_exists +def volume_metadata_delete(context, volume_id, key): + _volume_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key, session=None): + result = _volume_metadata_get_query(context, volume_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeMetadataNotFound(metadata_key=key, + volume_id=volume_id) + return result + + +@require_context +@require_volume_exists +def volume_metadata_update(context, volume_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = volume_metadata_get(context, volume_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + except exception.VolumeMetadataNotFound as e: + meta_ref = models.VolumeMetadata() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + +################### + + +@require_context +def snapshot_create(context, values): + values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), + models.SnapshotMetadata) + snapshot_ref = models.Snapshot() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + + return snapshot_get(context, values['id'], session=session) + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.Snapshot, session=session, + project_only=True).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + return model_query(context, models.Snapshot).all() + + +@require_context +def snapshot_get_all_for_volume(context, volume_id): + return model_query(context, models.Snapshot, read_deleted='no', + project_only=True).\ + filter_by(volume_id=volume_id).all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.Snapshot).\ + filter_by(project_id=project_id).\ + all() + + +@require_context +def snapshot_data_get_for_project(context, project_id, session=None): + authorize_project_context(context, project_id) + result = model_query(context, + func.count(models.Snapshot.id), + func.sum(models.Snapshot.volume_size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_context +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Return snapshots that were active during window.""" + session = get_session() + query = session.query(models.Snapshot) + + query = query.filter(or_(models.Snapshot.deleted_at == None, + models.Snapshot.deleted_at > begin)) + if end: + query = query.filter(models.Snapshot.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + +#################### + + +def _snapshot_metadata_get_query(context, snapshot_id, session=None): + return model_query(context, models.SnapshotMetadata, + session=session, read_deleted="no").\ + filter_by(snapshot_id=snapshot_id) + + +@require_context +@require_snapshot_exists +def snapshot_metadata_get(context, snapshot_id): + rows = _snapshot_metadata_get_query(context, snapshot_id).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_snapshot_exists +def snapshot_metadata_delete(context, snapshot_id, key): + _snapshot_metadata_get_query(context, snapshot_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_snapshot_exists +def snapshot_metadata_get_item(context, snapshot_id, key, session=None): + result = _snapshot_metadata_get_query(context, + snapshot_id, + session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.SnapshotMetadataNotFound(metadata_key=key, + snapshot_id=snapshot_id) + return result + + +@require_context +@require_snapshot_exists +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = snapshot_metadata_get(context, snapshot_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = snapshot_metadata_get_item(context, snapshot_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = snapshot_metadata_get_item(context, snapshot_id, + meta_key, session) + except exception.SnapshotMetadataNotFound as e: + meta_ref = models.SnapshotMetadata() + item.update({"key": meta_key, "snapshot_id": snapshot_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, id, session=session) + migration.update(values) + migration.save(session=session) + return migration + + +@require_admin_context +def migration_get(context, id, session=None): + result = model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter_by(id=id).\ + first() + + if not result: + raise exception.MigrationNotFound(migration_id=id) + + return result + + +@require_admin_context +def migration_get_by_instance_and_status(context, instance_uuid, status): + result = model_query(context, models.Migration, read_deleted="yes").\ + filter_by(instance_uuid=instance_uuid).\ + filter_by(status=status).\ + first() + + if not result: + raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, + status=status) + + return result + + +@require_admin_context +def migration_get_all_unconfirmed(context, confirm_window, session=None): + confirm_window = timeutils.utcnow() - datetime.timedelta( + seconds=confirm_window) + + return model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter(models.Migration.updated_at <= confirm_window).\ + filter_by(status="finished").\ + all() + + +################## + + +@require_admin_context +def volume_type_create(context, values): + """Create a new instance type. In order to pass in extra specs, + the values dict should contain a 'extra_specs' key/value pair: + + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + + """ + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + + session = get_session() + with session.begin(): + try: + volume_type_get_by_name(context, values['name'], session) + raise exception.VolumeTypeExists(id=values['name']) + except exception.VolumeTypeNotFoundByName: + pass + try: + volume_type_get(context, values['id'], session) + raise exception.VolumeTypeExists(id=values['id']) + except exception.VolumeTypeNotFound: + pass + try: + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + models.VolumeTypeExtraSpecs) + volume_type_ref = models.VolumeTypes() + volume_type_ref.update(values) + volume_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return volume_type_ref + + +@require_context +def volume_type_get_all(context, inactive=False, filters=None): + """ + Returns a dict describing all volume_types with name as key. + """ + filters = filters or {} + + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.VolumeTypes, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + order_by("name").\ + all() + + # TODO(sirp): this patern of converting rows to a result with extra_specs + # is repeated quite a bit, might be worth creating a method for it + result = {} + for row in rows: + result[row['name']] = _dict_with_extra_specs(row) + + return result + + +@require_context +def volume_type_get(context, id, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() + + if not result: + raise exception.VolumeTypeNotFound(volume_type_id=id) + + return _dict_with_extra_specs(result) + + +@require_context +def volume_type_get_by_name(context, name, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() + + if not result: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return _dict_with_extra_specs(result) + + +@require_admin_context +def volume_type_destroy(context, id): + volume_type_get(context, id) + + session = get_session() + with session.begin(): + session.query(models.VolumeTypes).\ + filter_by(id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_get_active_by_window(context, + begin, + end=None, + project_id=None): + """Return volumes that were active during window.""" + session = get_session() + query = session.query(models.Volume) + + query = query.filter(or_(models.Volume.deleted_at == None, + models.Volume.deleted_at > begin)) + if end: + query = query.filter(models.Volume.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() + + +#################### + + +def _volume_type_extra_specs_query(context, volume_type_id, session=None): + return model_query(context, models.VolumeTypeExtraSpecs, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id) + + +@require_context +def volume_type_extra_specs_get(context, volume_type_id): + rows = _volume_type_extra_specs_query(context, volume_type_id).\ + all() + + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +def volume_type_extra_specs_delete(context, volume_type_id, key): + _volume_type_extra_specs_query(context, volume_type_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): + result = _volume_type_extra_specs_query( + context, volume_type_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeTypeExtraSpecsNotFound( + extra_specs_key=key, + volume_type_id=volume_type_id) + + return result + + +@require_context +def volume_type_extra_specs_update_or_create(context, volume_type_id, + specs): + session = get_session() + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound, e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": False}) + spec_ref.save(session=session) + return specs + + +#################### + + +@require_context +@require_volume_exists +def volume_glance_metadata_get(context, volume_id, session=None): + """Return the Glance metadata for the specified volume.""" + if not session: + session = get_session() + + return session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).all() + + +@require_context +@require_snapshot_exists +def volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): + """Return the Glance metadata for the specified snapshot.""" + if not session: + session = get_session() + + return session.query(models.VolumeGlanceMetadata).\ + filter_by(snapshot_id=snapshot_id).\ + filter_by(deleted=False).all() + + +@require_context +@require_volume_exists +def volume_glance_metadata_create(context, volume_id, key, value, + session=None): + """ + Update the Glance metadata for a volume by adding a new key:value pair. + This API does not support changing the value of a key once it has been + created. + """ + if session is None: + session = get_session() + + with session.begin(): + rows = session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(key=key).\ + filter_by(deleted=False).all() + + if len(rows) > 0: + raise exception.GlanceMetadataExists(key=key, + volume_id=volume_id) + + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = key + vol_glance_metadata.value = value + + vol_glance_metadata.save(session=session) + + return + + +@require_context +@require_snapshot_exists +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id, + session=None): + """ + Update the Glance metadata for a snapshot by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the snapshot will retain the original metadata. + """ + if session is None: + session = get_session() + + metadata = volume_glance_metadata_get(context, volume_id, session=session) + with session.begin(): + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.snapshot_id = snapshot_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id, + session=None): + """ + Update the Glance metadata for a volume by copying all of the key:value + pairs from the originating volume. This is so that a volume created from + the volume (clone) will retain the original metadata. + """ + if session is None: + session = get_session() + + metadata = volume_glance_metadata_get(context, + src_volume_id, + session=session) + with session.begin(): + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id, + session=None): + """ + Update the Glance metadata from a volume (created from a snapshot) by + copying all of the key:value pairs from the originating snapshot. This is + so that the Glance metadata from the original volume is retained. + """ + if session is None: + session = get_session() + + metadata = volume_snapshot_glance_metadata_get(context, snapshot_id, + session=session) + with session.begin(): + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +def volume_glance_metadata_delete_by_volume(context, volume_id): + session = get_session() + session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + session = get_session() + session.query(models.VolumeGlanceMetadata).\ + filter_by(snapshot_id=snapshot_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +#################### + + +@require_admin_context +def sm_backend_conf_create(context, values): + backend_conf = models.SMBackendConf() + backend_conf.update(values) + backend_conf.save() + return backend_conf + + +@require_admin_context +def sm_backend_conf_update(context, sm_backend_id, values): + session = get_session() + with session.begin(): + backend_conf = model_query(context, models.SMBackendConf, + session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not backend_conf: + raise exception.NotFound( + _("No backend config with id %(sm_backend_id)s") % locals()) + + backend_conf.update(values) + backend_conf.save(session=session) + return backend_conf + + +@require_admin_context +def sm_backend_conf_delete(context, sm_backend_id): + # FIXME(sirp): for consistency, shouldn't this just mark as deleted with + # `purge` actually deleting the record? + session = get_session() + with session.begin(): + model_query(context, models.SMBackendConf, session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + delete() + + +@require_admin_context +def sm_backend_conf_get(context, sm_backend_id): + result = model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not result: + raise exception.NotFound(_("No backend config with id " + "%(sm_backend_id)s") % locals()) + + return result + + +@require_admin_context +def sm_backend_conf_get_by_sr(context, sr_uuid): + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(sr_uuid=sr_uuid).\ + first() + + +@require_admin_context +def sm_backend_conf_get_all(context): + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + all() + + +#################### + + +def _sm_flavor_get_query(context, sm_flavor_label, session=None): + return model_query(context, models.SMFlavors, session=session, + read_deleted="yes").\ + filter_by(label=sm_flavor_label) + + +@require_admin_context +def sm_flavor_create(context, values): + sm_flavor = models.SMFlavors() + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_update(context, sm_flavor_label, values): + sm_flavor = sm_flavor_get(context, sm_flavor_label) + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_delete(context, sm_flavor_label): + session = get_session() + with session.begin(): + _sm_flavor_get_query(context, sm_flavor_label).delete() + + +@require_admin_context +def sm_flavor_get(context, sm_flavor_label): + result = _sm_flavor_get_query(context, sm_flavor_label).first() + + if not result: + raise exception.NotFound( + _("No sm_flavor called %(sm_flavor)s") % locals()) + + return result + + +@require_admin_context +def sm_flavor_get_all(context): + return model_query(context, models.SMFlavors, read_deleted="yes").all() + + +############################### + + +def _sm_volume_get_query(context, volume_id, session=None): + return model_query(context, models.SMVolume, session=session, + read_deleted="yes").\ + filter_by(id=volume_id) + + +def sm_volume_create(context, values): + sm_volume = models.SMVolume() + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_update(context, volume_id, values): + sm_volume = sm_volume_get(context, volume_id) + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_delete(context, volume_id): + session = get_session() + with session.begin(): + _sm_volume_get_query(context, volume_id, session=session).delete() + + +def sm_volume_get(context, volume_id): + result = _sm_volume_get_query(context, volume_id).first() + + if not result: + raise exception.NotFound( + _("No sm_volume with id %(volume_id)s") % locals()) + + return result + + +def sm_volume_get_all(context): + return model_query(context, models.SMVolume, read_deleted="yes").all() + + +############################### + + +@require_context +def backup_get(context, backup_id, session=None): + result = model_query(context, models.Backup, + session=session, project_only=True).\ + filter_by(id=backup_id).\ + first() + + if not result: + raise exception.BackupNotFound(backup_id=backup_id) + + return result + + +@require_admin_context +def backup_get_all(context): + return model_query(context, models.Backup).all() + + +@require_admin_context +def backup_get_all_by_host(context, host): + return model_query(context, models.Backup).filter_by(host=host).all() + + +@require_context +def backup_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + return model_query(context, models.Backup).\ + filter_by(project_id=project_id).all() + + +@require_context +def backup_create(context, values): + backup = models.Backup() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + backup.update(values) + backup.save() + return backup + + +@require_context +def backup_update(context, backup_id, values): + session = get_session() + with session.begin(): + backup = model_query(context, models.Backup, + session=session, read_deleted="yes").\ + filter_by(id=backup_id).first() + + if not backup: + raise exception.BackupNotFound( + _("No backup with id %(backup_id)s") % locals()) + + backup.update(values) + backup.save(session=session) + return backup + + +@require_admin_context +def backup_destroy(context, backup_id): + session = get_session() + with session.begin(): + session.query(models.Backup).\ + filter_by(id=backup_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +################ + + +def _share_get_query(context, session=None): + if session is None: + session = get_session() + return model_query(context, models.Share, session=session) + + +@require_context +def share_create(context, values): + share_ref = models.Share() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + share_ref.update(values) + session = get_session() + with session.begin(): + share_ref.save(session=session) + + return share_ref + + +@require_context +def share_update(context, share_id, values): + session = get_session() + with session.begin(): + share_ref = share_get(context, share_id, session=session) + share_ref.update(values) + share_ref.save(session=session) + return share_ref + + +@require_context +def share_get(context, share_id, session=None): + result = _share_get_query(context, session).filter_by(id=share_id).first() + if result is None: + raise exception.NotFound() + return result + + +@require_admin_context +def share_get_all(context): + return _share_get_query(context).all() + + +@require_admin_context +def share_get_all_by_host(context, host): + query = _share_get_query(context) + return query.filter_by(host=host).all() + + +@require_context +def share_get_all_by_project(context, project_id): + """Returns list of shares with given project ID.""" + return _share_get_query(context).filter_by(project_id=project_id).all() + + +@require_context +def share_delete(context, share_id): + session = get_session() + share_ref = share_get(context, share_id, session) + share_ref.update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at'), + 'status': 'deleted'}) + share_ref.save(session) + + +################### + + +def _share_access_get_query(context, session, values): + """ + Get access record. + """ + query = model_query(context, models.ShareAccessMapping, session=session) + return query.filter_by(**values) + + +@require_context +def share_access_create(context, values): + session = get_session() + with session.begin(): + access_ref = models.ShareAccessMapping() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + access_ref.update(values) + access_ref.save(session=session) + return access_ref + + +@require_context +def share_access_get(context, access_id): + """ + Get access record. + """ + session = get_session() + access = _share_access_get_query(context, session, + {'id': access_id}).first() + if access: + return access + else: + raise exception.NotFound() + + +@require_context +def share_access_get_all_for_share(context, share_id): + session = get_session() + return _share_access_get_query(context, session, + {'share_id': share_id}).all() + + +@require_context +def share_access_delete(context, access_id): + session = get_session() + with session.begin(): + session.query(models.ShareAccessMapping).\ + filter_by(id=access_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at'), + 'state': models.ShareAccessMapping.STATE_DELETED}) + + +@require_context +def share_access_update(context, access_id, values): + session = get_session() + with session.begin(): + access = _share_access_get_query(context, session, {'id': access_id}) + access = access.one() + access.update(values) + access.save(session=session) + return access + + +################### + + +@require_context +def share_snapshot_create(context, values): + snapshot_ref = models.ShareSnapshot() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + + return share_snapshot_get(context, values['id'], session=session) + + +@require_admin_context +def share_snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.ShareSnapshot).\ + filter_by(id=snapshot_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def share_snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.ShareSnapshot, session=session, + project_only=True).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_admin_context +def share_snapshot_get_all(context): + return model_query(context, models.ShareSnapshot).all() + + +@require_context +def share_snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.ShareSnapshot).\ + filter_by(project_id=project_id).\ + all() + + +@require_context +def share_snapshot_get_all_for_share(context, share_id): + return model_query(context, models.ShareSnapshot, read_deleted='no', + project_only=True).\ + filter_by(share_id=share_id).all() + + +@require_context +def share_snapshot_data_get_for_project(context, project_id, session=None): + authorize_project_context(context, project_id) + result = model_query(context, + func.count(models.ShareSnapshot.id), + func.sum(models.ShareSnapshot.share_size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_context +def share_snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = share_snapshot_get(context, snapshot_id, + session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) diff --git a/cinder/db/sqlalchemy/migrate_repo/README b/cinder/db/sqlalchemy/migrate_repo/README new file mode 100644 index 0000000000..6218f8cac4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/cinder/db/sqlalchemy/migrate_repo/__init__.py b/cinder/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/db/sqlalchemy/migrate_repo/manage.py b/cinder/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 0000000000..09e340f44f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/cinder/db/sqlalchemy/migrate_repo/migrate.cfg b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 0000000000..10c685c0e5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=cinder + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py b/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py new file mode 100644 index 0000000000..e46faf4b13 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py @@ -0,0 +1,272 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String, Table + +from cinder import flags +from cinder.openstack.common import log as logging + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table( + 'migrations', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('source_compute', String(length=255)), + Column('dest_compute', String(length=255)), + Column('dest_host', String(length=255)), + Column('status', String(length=255)), + Column('instance_uuid', String(length=255)), + Column('old_instance_type_id', Integer), + Column('new_instance_type_id', Integer), + mysql_engine='InnoDB' + ) + + services = Table( + 'services', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('host', String(length=255)), + Column('binary', String(length=255)), + Column('topic', String(length=255)), + Column('report_count', Integer, nullable=False), + Column('disabled', Boolean), + Column('availability_zone', String(length=255)), + mysql_engine='InnoDB' + ) + + sm_flavors = Table( + 'sm_flavors', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('label', String(length=255)), + Column('description', String(length=255)), + mysql_engine='InnoDB' + ) + + sm_backend_config = Table( + 'sm_backend_config', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('flavor_id', Integer, ForeignKey('sm_flavors.id'), + nullable=False), + Column('sr_uuid', String(length=255)), + Column('sr_type', String(length=255)), + Column('config_params', String(length=2047)), + mysql_engine='InnoDB' + ) + + sm_volume = Table( + 'sm_volume', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + ForeignKey('volumes.id'), + primary_key=True, + nullable=False), + Column('backend_id', Integer, ForeignKey('sm_backend_config.id'), + nullable=False), + Column('vdi_uuid', String(length=255)), + mysql_engine='InnoDB' + ) + + snapshots = Table( + 'snapshots', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), primary_key=True, nullable=False), + Column('volume_id', String(length=36), nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('status', String(length=255)), + Column('progress', String(length=255)), + Column('volume_size', Integer), + Column('scheduled_at', DateTime), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + mysql_engine='InnoDB' + ) + + volume_types = Table( + 'volume_types', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('name', String(length=255)), + mysql_engine='InnoDB' + ) + + volume_metadata = Table( + 'volume_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + volume_type_extra_specs = Table( + 'volume_type_extra_specs', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('volume_type_id', Integer, ForeignKey('volume_types.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + volumes = Table( + 'volumes', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), primary_key=True, nullable=False), + Column('ec2_id', String(length=255)), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('host', String(length=255)), + Column('size', Integer), + Column('availability_zone', String(length=255)), + Column('instance_uuid', String(length=36)), + Column('mountpoint', String(length=255)), + Column('attach_time', String(length=255)), + Column('status', String(length=255)), + Column('attach_status', String(length=255)), + Column('scheduled_at', DateTime), + Column('launched_at', DateTime), + Column('terminated_at', DateTime), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + Column('provider_location', String(length=256)), + Column('provider_auth', String(length=256)), + Column('snapshot_id', String(length=36)), + Column('volume_type_id', Integer), + mysql_engine='InnoDB' + ) + + quotas = Table( + 'quotas', meta, + Column('id', Integer, primary_key=True, nullable=False), + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('project_id', String(length=255)), + Column('resource', String(length=255), nullable=False), + Column('hard_limit', Integer), + mysql_engine='InnoDB' + ) + + iscsi_targets = Table( + 'iscsi_targets', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('target_num', Integer), + Column('host', String(length=255)), + Column('volume_id', String(length=36), ForeignKey('volumes.id'), + nullable=True), + mysql_engine='InnoDB' + ) + + # create all tables + # Take care on create order for those with FK dependencies + tables = [sm_flavors, + sm_backend_config, + snapshots, + volume_types, + volumes, + iscsi_targets, + migrations, + quotas, + services, + sm_volume, + volume_metadata, + volume_type_extra_specs] + + for table in tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception(_('Exception while creating table.')) + raise + + if migrate_engine.name == "mysql": + tables = ["sm_flavors", + "sm_backend_config", + "snapshots", + "volume_types", + "volumes", + "iscsi_targets", + "migrate_version", + "migrations", + "quotas", + "services", + "sm_volume", + "volume_metadata", + "volume_type_extra_specs"] + + sql = "SET foreign_key_checks = 0;" + for table in tables: + sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table + sql += "SET foreign_key_checks = 1;" + sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \ + % migrate_engine.url.database + sql += "ALTER TABLE %s Engine=InnoDB;" % table + migrate_engine.execute(sql) + + +def downgrade(migrate_engine): + LOG.exception(_('Downgrade from initial Cinder install is unsupported.')) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py new file mode 100644 index 0000000000..fe911c0d40 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py @@ -0,0 +1,150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + quota_classes = Table('quota_classes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('class_name', + String(length=255, + convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + index=True), + Column('resource', + String(length=255, + convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False)), + Column('hard_limit', Integer(), nullable=True), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + try: + quota_classes.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(quota_classes)) + raise + + quota_usages = Table('quota_usages', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('project_id', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + index=True), + Column('resource', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False)), + Column('in_use', Integer(), nullable=False), + Column('reserved', Integer(), nullable=False), + Column('until_refresh', Integer(), nullable=True), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + try: + quota_usages.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(quota_usages)) + raise + + reservations = Table('reservations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('uuid', + String(length=36, + convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + nullable=False), + Column('usage_id', + Integer(), + ForeignKey('quota_usages.id'), + nullable=False), + Column('project_id', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False), + index=True), + Column('resource', + String(length=255, convert_unicode=True, + unicode_error=None, + _warn_on_bytestring=False)), + Column('delta', Integer(), nullable=False), + Column('expire', DateTime(timezone=False)), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + + try: + reservations.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(reservations)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + quota_classes = Table('quota_classes', meta, autoload=True) + try: + quota_classes.drop() + except Exception: + LOG.error(_("quota_classes table not dropped")) + raise + + quota_usages = Table('quota_usages', meta, autoload=True) + try: + quota_usages.drop() + except Exception: + LOG.error(_("quota_usages table not dropped")) + raise + + reservations = Table('reservations', meta, autoload=True) + try: + reservations.drop() + except Exception: + LOG.error(_("reservations table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py b/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py new file mode 100644 index 0000000000..7d6a3b1d46 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Text, Boolean +from sqlalchemy import MetaData, Integer, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # Just for the ForeignKey and column creation to succeed, these are not the + # actual definitions of tables . + # + volumes = Table('volumes', + meta, + Column('id', Integer(), + primary_key=True, nullable=False), + mysql_engine='InnoDB') + snapshots = Table('snapshots', + meta, + Column('id', Integer(), + primary_key=True, nullable=False), + mysql_engine='InnoDB') + # Create new table + volume_glance_metadata = Table( + 'volume_glance_metadata', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id')), + Column('snapshot_id', String(length=36), + ForeignKey('snapshots.id')), + Column('key', String(255)), + Column('value', Text), + mysql_engine='InnoDB' + ) + + try: + volume_glance_metadata.create() + except Exception: + LOG.exception(_("Exception while creating table " + "'volume_glance_metedata'")) + meta.drop_all(tables=[volume_glance_metadata]) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volume_glance_metadata = Table('volume_glance_metadata', + meta, autoload=True) + try: + volume_glance_metadata.drop() + except Exception: + LOG.error(_("volume_glance_metadata table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py new file mode 100644 index 0000000000..202079b0be --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from cinder.openstack.common import log as logging +from migrate import ForeignKeyConstraint +from sqlalchemy import Integer, MetaData, String, Table + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume_type_id to UUID.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + volume_types = Table('volume_types', meta, autoload=True) + extra_specs = Table('volume_type_extra_specs', meta, autoload=True) + + fkey_remove_list = [volumes.c.volume_type_id, + volume_types.c.id, + extra_specs.c.volume_type_id] + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + + try: + fkey.drop() + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + volumes.c.volume_type_id.alter(String(36)) + volume_types.c.id.alter(String(36)) + extra_specs.c.volume_type_id.alter(String(36)) + + vtype_list = list(volume_types.select().execute()) + for t in vtype_list: + new_id = str(uuid.uuid4()) + + volumes.update().\ + where(volumes.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + extra_specs.update().\ + where(extra_specs.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + volume_types.update().\ + where(volume_types.c.id == t['id']).\ + values(id=new_id).execute() + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + try: + fkey.create() + LOG.info('Created foreign key %s' % fkey_name) + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + +def downgrade(migrate_engine): + """Convert volume_type from UUID back to int.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + volume_types = Table('volume_types', meta, autoload=True) + extra_specs = Table('volume_type_extra_specs', meta, autoload=True) + + fkey_remove_list = [volumes.c.volume_type_id, + volume_types.c.id, + extra_specs.c.volume_type_id] + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + + try: + fkey.drop() + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + vtype_list = list(volume_types.select().execute()) + new_id = 1 + + for t in vtype_list: + volumes.update().\ + where(volumes.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + extra_specs.update().\ + where(extra_specs.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + volume_types.update().\ + where(volume_types.c.id == t['id']).\ + values(id=new_id).execute() + + new_id += 1 + + volumes.c.volume_type_id.alter(Integer) + volume_types.c.id.alter(Integer) + extra_specs.c.volume_type_id.alter(Integer) + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + try: + fkey.create() + LOG.info('Created foreign key %s' % fkey_name) + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py new file mode 100644 index 0000000000..d20cda953c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common import log as logging +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Add source volume id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + source_volid = Column('source_volid', String(36)) + volumes.create_column(source_volid) + volumes.update().values(source_volid=None).execute() + + +def downgrade(migrate_engine): + """Remove source volume id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + source_volid = Column('source_volid', String(36)) + volumes.drop_column(source_volid) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql new file mode 100644 index 0000000000..c34f31752f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql @@ -0,0 +1,124 @@ +BEGIN TRANSACTION; + +CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id VARCHAR(36), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id, + source_volid + FROM volumes; + +DROP TABLE volumes; + +CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id VARCHAR(36), + volume_type_id VARCHAR(36), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id + FROM volumes_backup; + +DROP TABLE volumes_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py new file mode 100644 index 0000000000..ddd86d3228 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + provider_location = Column('provider_location', String(255)) + snapshots.create_column(provider_location) + snapshots.update().values(provider_location=None).execute() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + provider_location = snapshots.columns.provider_location + snapshots.drop_column(provider_location) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py new file mode 100644 index 0000000000..90cd67cd3b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import MetaData, Table +from migrate.changeset.constraint import ForeignKeyConstraint + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + ForeignKeyConstraint( + columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id]).create() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + ForeignKeyConstraint( + columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id]).drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql new file mode 100644 index 0000000000..d2fe9b6930 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql @@ -0,0 +1,32 @@ +-- As sqlite does not support the DROP FOREIGN KEY, we need to create +-- the table, and move all the data to it. + +BEGIN TRANSACTION; + +CREATE TABLE snapshots_v6 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + volume_id VARCHAR(36) NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + scheduled_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO snapshots_v6 SELECT * FROM snapshots; + +DROP TABLE snapshots; + +ALTER TABLE snapshots_v6 RENAME TO snapshots; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py new file mode 100644 index 0000000000..5dfbed0f63 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + backups = Table( + 'backups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', String(36), primary_key=True, nullable=False), + Column('volume_id', String(36), nullable=False), + Column('user_id', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('project_id', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('host', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('availability_zone', String(length=255, + convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('display_name', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('display_description', String(length=255, + convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('container', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('status', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('fail_reason', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('service_metadata', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('service', String(length=255, convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False)), + Column('size', Integer()), + Column('object_count', Integer()), + mysql_engine='InnoDB' + ) + + try: + backups.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(backups)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + backups = Table('backups', meta, autoload=True) + try: + backups.drop() + except Exception: + LOG.error(_("backups table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py new file mode 100644 index 0000000000..3ccd79a232 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + + # New table + snapshot_metadata = Table( + 'snapshot_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('snapshot_id', String(length=36), ForeignKey('snapshots.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + try: + snapshot_metadata.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(snapshot_metadata)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + snapshot_metadata = Table('snapshot_metadata', + meta, + autoload=True) + try: + snapshot_metadata.drop() + except Exception: + LOG.error(_("snapshot_metadata table not dropped")) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py new file mode 100644 index 0000000000..11914c58f4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_share_tables.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, String, DateTime, Boolean +from sqlalchemy import Integer, Column, ForeignKey +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Create shares and share_access_map tables.""" + meta = MetaData() + meta.bind = migrate_engine + + shares = Table('shares', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + primary_key=True, nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('host', String(length=255)), + Column('size', Integer), + Column('availability_zone', + String(length=255)), + Column('status', String(length=255)), + Column('scheduled_at', DateTime), + Column('launched_at', DateTime), + Column('terminated_at', DateTime), + Column('display_name', String(length=255)), + Column('display_description', + String(length=255)), + Column('snapshot_id', String(length=36)), + Column('share_proto', String(255)), + Column('export_location', String(255)), + mysql_engine='InnoDB') + + access_map = Table('share_access_map', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + primary_key=True, nullable=False), + Column('share_id', String(36), ForeignKey('shares.id'), + nullable=False), + Column('access_type', String(255)), + Column('access_to', String(255)), + Column('state', String(255)), + mysql_engine='InnoDB') + + shares.create() + access_map.create() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + shares = Table('shares', meta, autoload=True) + access_map = Table('share_access_map', meta, autoload=True) + access_map.drop() + shares.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py new file mode 100644 index 0000000000..7eaff3c5c9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/011_add_share_snapshot_table.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.schema import Column, ForeignKey, MetaData, Table +from sqlalchemy.types import Boolean, DateTime, Integer, String + +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Create shares and share_access_map tables.""" + meta = MetaData() + meta.bind = migrate_engine + + shares = Table('shares', meta, autoload=True) + share_snapshots = Table( + 'share_snapshots', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), primary_key=True, nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('share_id', String(36), ForeignKey('shares.id'), + nullable=False), + Column('status', String(length=255)), + Column('progress', String(length=255)), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + Column('share_size', Integer), + Column('share_proto', String(length=255)), + Column('export_location', String(255)), + mysql_engine='InnoDB') + + try: + share_snapshots.create() + except Exception: + LOG.error(_("Table %r not created!"), share_snapshots) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + share_snapshots = Table('share_snapshots', meta, autoload=True) + try: + share_snapshots.drop() + except Exception: + LOG.error(_("share_snapshots table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py b/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/db/sqlalchemy/migration.py b/cinder/db/sqlalchemy/migration.py new file mode 100644 index 0000000000..5365d9cdcd --- /dev/null +++ b/cinder/db/sqlalchemy/migration.py @@ -0,0 +1,118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import distutils.version as dist_version +import os + +from cinder.db import migration +from cinder.db.sqlalchemy.session import get_engine +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging + + +import migrate +from migrate.versioning import util as migrate_util +import sqlalchemy + + +LOG = logging.getLogger(__name__) + + +@migrate_util.decorator +def patched_with_engine(f, *a, **kw): + url = a[0] + engine = migrate_util.construct_engine(url, **kw) + + try: + kw['engine'] = engine + return f(*a, **kw) + finally: + if isinstance(engine, migrate_util.Engine) and engine is not url: + migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) + engine.dispose() + + +# TODO(jkoelker) When migrate 0.7.3 is released and cinder depends +# on that version or higher, this can be removed +MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') +if (not hasattr(migrate, '__version__') or + dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): + migrate_util.with_engine = patched_with_engine + + +# NOTE(jkoelker) Delay importing migrate until we are patched +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository + +FLAGS = flags.FLAGS + +_REPOSITORY = None + + +def db_sync(version=None): + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.Error(_("version should be an integer")) + + current_version = db_version() + repository = _find_migrate_repo() + if version is None or version > current_version: + return versioning_api.upgrade(get_engine(), repository, version) + else: + return versioning_api.downgrade(get_engine(), repository, + version) + + +def db_version(): + repository = _find_migrate_repo() + try: + return versioning_api.db_version(get_engine(), repository) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = get_engine() + meta.reflect(bind=engine) + tables = meta.tables + if len(tables) == 0: + db_version_control(migration.INIT_VERSION) + return versioning_api.db_version(get_engine(), repository) + else: + raise exception.Error(_("Upgrade DB using Essex release first.")) + + +def db_version_control(version=None): + repository = _find_migrate_repo() + versioning_api.version_control(get_engine(), repository, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + global _REPOSITORY + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + if _REPOSITORY is None: + _REPOSITORY = Repository(path) + return _REPOSITORY diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py new file mode 100644 index 0000000000..9798347902 --- /dev/null +++ b/cinder/db/sqlalchemy/models.py @@ -0,0 +1,529 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models for cinder data. +""" + +from sqlalchemy import Column, Integer, String, Text, schema +from sqlalchemy.exc import IntegrityError +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey, DateTime, Boolean, Enum +from sqlalchemy.orm import relationship, backref, object_mapper + +from cinder.db.sqlalchemy.session import get_session + +from cinder import exception +from cinder import flags +from cinder.openstack.common import timeutils + + +FLAGS = flags.FLAGS +BASE = declarative_base() + + +class CinderBase(object): + """Base class for Cinder Models.""" + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + created_at = Column(DateTime, default=timeutils.utcnow) + updated_at = Column(DateTime, onupdate=timeutils.utcnow) + deleted_at = Column(DateTime) + deleted = Column(Boolean, default=False) + metadata = None + + def save(self, session=None): + """Save this object.""" + if not session: + session = get_session() + session.add(self) + try: + session.flush() + except IntegrityError, e: + if str(e).endswith('is not unique'): + raise exception.Duplicate(str(e)) + else: + raise + + def delete(self, session=None): + """Delete this object.""" + self.deleted = True + self.deleted_at = timeutils.utcnow() + self.save(session=session) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + self._i = iter(object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in values.iteritems(): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() + + +class Service(BASE, CinderBase): + """Represents a running service on a host.""" + + __tablename__ = 'services' + id = Column(Integer, primary_key=True) + host = Column(String(255)) # , ForeignKey('hosts.id')) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='cinder') + + +class CinderNode(BASE, CinderBase): + """Represents a running cinder service on a host.""" + + __tablename__ = 'cinder_nodes' + id = Column(Integer, primary_key=True) + service_id = Column(Integer, ForeignKey('services.id'), nullable=True) + + +class Volume(BASE, CinderBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'volumes' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.volume_name_template % self.id + + ec2_id = Column(Integer) + user_id = Column(String(255)) + project_id = Column(String(255)) + + snapshot_id = Column(String(36)) + + host = Column(String(255)) # , ForeignKey('hosts.id')) + size = Column(Integer) + availability_zone = Column(String(255)) # TODO(vish): foreign key? + instance_uuid = Column(String(36)) + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # TODO(vish): datetime + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + + volume_type_id = Column(String(36)) + source_volid = Column(String(36)) + + +class VolumeMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a volume.""" + __tablename__ = 'volume_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeMetadata.volume_id == Volume.id,' + 'VolumeMetadata.deleted == False)') + + +class VolumeTypes(BASE, CinderBase): + """Represent possible volume_types of volumes offered.""" + __tablename__ = "volume_types" + id = Column(String(36), primary_key=True) + name = Column(String(255)) + + volumes = relationship(Volume, + backref=backref('volume_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Volume.volume_type_id == VolumeTypes.id, ' + 'VolumeTypes.deleted == False)') + + +class VolumeTypeExtraSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for a volume_type.""" + __tablename__ = 'volume_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_type_id = Column(String(36), + ForeignKey('volume_types.id'), + nullable=False) + volume_type = relationship( + VolumeTypes, + backref="extra_specs", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.deleted == False)' + ) + + +class VolumeGlanceMetadata(BASE, CinderBase): + """Glance metadata for a bootable volume.""" + __tablename__ = 'volume_glance_metadata' + id = Column(Integer, primary_key=True, nullable=False) + volume_id = Column(String(36), ForeignKey('volumes.id')) + snapshot_id = Column(String(36), ForeignKey('snapshots.id')) + key = Column(String(255)) + value = Column(Text) + volume = relationship(Volume, backref="volume_glance_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeGlanceMetadata.volume_id == Volume.id,' + 'VolumeGlanceMetadata.deleted == False)') + + +class Quota(BASE, CinderBase): + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then the + default for the quota class is used. If there is no row for a + given quota class and resource, then the default for the + deployment is used. If the row is present but the hard limit is + Null, then the resource is unlimited. + """ + + __tablename__ = 'quotas' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaClass(BASE, CinderBase): + """Represents a single quota override for a quota class. + + If there is no row for a given quota class and resource, then the + default for the deployment is used. If the row is present but the + hard limit is Null, then the resource is unlimited. + """ + + __tablename__ = 'quota_classes' + id = Column(Integer, primary_key=True) + + class_name = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaUsage(BASE, CinderBase): + """Represents the current usage for a given resource.""" + + __tablename__ = 'quota_usages' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + resource = Column(String(255)) + + in_use = Column(Integer) + reserved = Column(Integer) + + @property + def total(self): + return self.in_use + self.reserved + + until_refresh = Column(Integer, nullable=True) + + +class Reservation(BASE, CinderBase): + """Represents a resource reservation for quotas.""" + + __tablename__ = 'reservations' + id = Column(Integer, primary_key=True) + uuid = Column(String(36), nullable=False) + + usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) + + project_id = Column(String(255), index=True) + resource = Column(String(255)) + + delta = Column(Integer) + expire = Column(DateTime, nullable=False) + + +class Snapshot(BASE, CinderBase): + """Represents a block storage device that can be attached to a VM.""" + __tablename__ = 'snapshots' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + + volume = relationship(Volume, backref="snapshots", + foreign_keys=volume_id, + primaryjoin='and_(' + 'Snapshot.volume_id == Volume.id,' + 'Snapshot.deleted == False)') + + +class SnapshotMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a snapshot.""" + __tablename__ = 'snapshot_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + snapshot_id = Column(String(36), + ForeignKey('snapshots.id'), + nullable=False) + snapshot = relationship(Snapshot, backref="snapshot_metadata", + foreign_keys=snapshot_id, + primaryjoin='and_(' + 'SnapshotMetadata.snapshot_id == Snapshot.id,' + 'SnapshotMetadata.deleted == False)') + + +class IscsiTarget(BASE, CinderBase): + """Represents an iscsi target for a given host.""" + __tablename__ = 'iscsi_targets' + __table_args__ = (schema.UniqueConstraint("target_num", "host"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + target_num = Column(Integer) + host = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + backref=backref('iscsi_target', uselist=False), + foreign_keys=volume_id, + primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' + 'IscsiTarget.deleted==False)') + + +class Migration(BASE, CinderBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) + # NOTE(tr3buchet): the ____compute variables are instance['host'] + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + # NOTE(tr3buchet): dest_host, btw, is an ip address + dest_host = Column(String(255)) + old_instance_type_id = Column(Integer()) + new_instance_type_id = Column(Integer()) + instance_uuid = Column(String(255), + ForeignKey('instances.uuid'), + nullable=True) + #TODO(_cerberus_): enum + status = Column(String(255)) + + +class SMFlavors(BASE, CinderBase): + """Represents a flavor for SM volumes.""" + __tablename__ = 'sm_flavors' + id = Column(Integer(), primary_key=True) + label = Column(String(255)) + description = Column(String(255)) + + +class SMBackendConf(BASE, CinderBase): + """Represents the connection to the backend for SM.""" + __tablename__ = 'sm_backend_config' + id = Column(Integer(), primary_key=True) + flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False) + sr_uuid = Column(String(255)) + sr_type = Column(String(255)) + config_params = Column(String(2047)) + + +class SMVolume(BASE, CinderBase): + __tablename__ = 'sm_volume' + id = Column(String(36), ForeignKey(Volume.id), primary_key=True) + backend_id = Column(Integer, ForeignKey('sm_backend_config.id'), + nullable=False) + vdi_uuid = Column(String(255)) + + +class Backup(BASE, CinderBase): + """Represents a backup of a volume to Swift.""" + __tablename__ = 'backups' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.backup_name_template % self.id + + user_id = Column(String(255), nullable=False) + project_id = Column(String(255), nullable=False) + + volume_id = Column(String(36), nullable=False) + host = Column(String(255)) + availability_zone = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + container = Column(String(255)) + status = Column(String(255)) + fail_reason = Column(String(255)) + service_metadata = Column(String(255)) + service = Column(String(255)) + size = Column(Integer) + object_count = Column(Integer) + + +class Share(BASE, CinderBase): + """Represents an NFS and CIFS shares.""" + __tablename__ = 'shares' + + @property + def name(self): + return FLAGS.share_name_template % self.id + + id = Column(String(36), primary_key=True) + user_id = Column(String(255)) + project_id = Column(String(255)) + host = Column(String(255)) + size = Column(Integer) + availability_zone = Column(String(255)) + status = Column(String(255)) + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + display_name = Column(String(255)) + display_description = Column(String(255)) + snapshot_id = Column(String(36)) + share_proto = Column(String(255)) + export_location = Column(String(255)) + + +class ShareAccessMapping(BASE, CinderBase): + """Represents access to NFS.""" + STATE_NEW = 'new' + STATE_ACTIVE = 'active' + STATE_DELETING = 'deleting' + STATE_DELETED = 'deleted' + STATE_ERROR = 'error' + + __tablename__ = 'share_access_map' + id = Column(String(36), primary_key=True) + share_id = Column(String(36), ForeignKey('shares.id')) + access_type = Column(String(255)) + access_to = Column(String(255)) + state = Column(Enum(STATE_NEW, STATE_ACTIVE, + STATE_DELETING, STATE_DELETED, STATE_ERROR), + default=STATE_NEW) + + +class ShareSnapshot(BASE, CinderBase): + """Represents a snapshot of a share.""" + __tablename__ = 'share_snapshots' + + @property + def name(self): + return FLAGS.share_snapshot_name_template % self.id + + @property + def share_name(self): + return FLAGS.share_name_template % self.share_id + + id = Column(String(36), primary_key=True) + user_id = Column(String(255)) + project_id = Column(String(255)) + share_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + share_size = Column(Integer) + share_proto = Column(String(255)) + export_location = Column(String(255)) + share = relationship(Share, backref="snapshots", + foreign_keys=share_id, + primaryjoin='and_(' + 'ShareSnapshot.share_id == Share.id,' + 'ShareSnapshot.deleted == False)') + + +def register_models(): + """Register Models and create metadata. + + Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. + """ + from sqlalchemy import create_engine + models = (Backup, + Migration, + Service, + Share, + ShareAccessMapping, + ShareSnapshot, + SMBackendConf, + SMFlavors, + SMVolume, + Volume, + VolumeMetadata, + SnapshotMetadata, + VolumeTypeExtraSpecs, + VolumeTypes, + VolumeGlanceMetadata, + ) + engine = create_engine(FLAGS.sql_connection, echo=False) + for model in models: + model.metadata.create_all(engine) diff --git a/cinder/db/sqlalchemy/session.py b/cinder/db/sqlalchemy/session.py new file mode 100644 index 0000000000..cd9045efae --- /dev/null +++ b/cinder/db/sqlalchemy/session.py @@ -0,0 +1,151 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend.""" + +import time + +from sqlalchemy.exc import DisconnectionError, OperationalError +import sqlalchemy.interfaces +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool + +import cinder.exception +import cinder.flags as flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None + + +def get_session(autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy session.""" + global _MAKER + + if _MAKER is None: + engine = get_engine() + _MAKER = get_maker(engine, autocommit, expire_on_commit) + + session = _MAKER() + session.query = cinder.exception.wrap_db_error(session.query) + session.flush = cinder.exception.wrap_db_error(session.flush) + return session + + +def synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def ping_listener(dbapi_conn, connection_rec, connection_proxy): + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + try: + dbapi_conn.cursor().execute('select 1') + except dbapi_conn.OperationalError, ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + LOG.warn(_('Got mysql server has gone away: %s'), ex) + raise DisconnectionError("Database server went away") + else: + raise + + +def is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + conn_err_codes = ('2002', '2003', '2006') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def get_engine(): + """Return a SQLAlchemy engine.""" + global _ENGINE + if _ENGINE is None: + connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) + + engine_args = { + "pool_recycle": FLAGS.sql_idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if FLAGS.sql_connection_debug >= 100: + engine_args['echo'] = 'debug' + elif FLAGS.sql_connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + engine_args["poolclass"] = NullPool + + if FLAGS.sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + + _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) + + if 'mysql' in connection_dict.drivername: + sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener) + elif "sqlite" in connection_dict.drivername: + if not FLAGS.sqlite_synchronous: + sqlalchemy.event.listen(_ENGINE, 'connect', + synchronous_switch_listener) + + try: + _ENGINE.connect() + except OperationalError, e: + if not is_db_connection_error(e.args[0]): + raise + + remaining = FLAGS.sql_max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warn(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(FLAGS.sql_retry_interval) + try: + _ENGINE.connect() + break + except OperationalError, e: + if ((remaining != 'infinite' and remaining == 0) or + not is_db_connection_error(e.args[0])): + raise + return _ENGINE + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) diff --git a/cinder/exception.py b/cinder/exception.py new file mode 100644 index 0000000000..a56282f056 --- /dev/null +++ b/cinder/exception.py @@ -0,0 +1,614 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cinder base exception handling. + +Includes decorator for re-raising Cinder-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +from oslo.config import cfg +import webob.exc + +from cinder import flags +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='make exception message format errors fatal'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(exc_log_opts) + + +class ConvertedException(webob.exc.WSGIHTTPException): + def __init__(self, code=0, title="", explanation=""): + self.code = code + self.title = title + self.explanation = explanation + super(ConvertedException, self).__init__() + + +class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = _('Unexpected error while running command.') + if exit_code is None: + exit_code = '-' + message = _('%(description)s\nCommand: %(cmd)s\n' + 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % locals() + IOError.__init__(self, message) + + +class Error(Exception): + pass + + +class DBError(Error): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +def wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise InvalidUnicodeParameter() + except Exception, e: + LOG.exception(_('DB exception wrapped.')) + raise DBError(e) + _wrap.func_name = f.func_name + return _wrap + + +class CinderException(Exception): + """Base Cinder Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.message % kwargs + + except Exception as e: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + if FLAGS.fatal_exception_format_errors: + raise e + else: + # at least get the core message out if something happened + message = self.message + + super(CinderException, self).__init__(message) + + +class GlanceConnectionFailed(CinderException): + message = _("Connection to glance failed") + ": %(reason)s" + + +class NotAuthorized(CinderException): + message = _("Not authorized.") + code = 403 + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class ImageNotAuthorized(CinderException): + message = _("Not authorized for image %(image_id)s.") + + +class Invalid(CinderException): + message = _("Unacceptable parameters.") + code = 400 + + +class InvalidSnapshot(Invalid): + message = _("Invalid snapshot") + ": %(reason)s" + + +class VolumeAttached(Invalid): + message = _("Volume %(volume_id)s is still attached, detach volume first.") + + +class SfJsonEncodeFailure(CinderException): + message = _("Failed to load data into json format") + + +class InvalidRequest(Invalid): + message = _("The request is invalid.") + + +class InvalidResults(Invalid): + message = _("The results are invalid.") + + +class InvalidInput(Invalid): + message = _("Invalid input received") + ": %(reason)s" + + +class InvalidVolumeType(Invalid): + message = _("Invalid volume type") + ": %(reason)s" + + +class InvalidVolume(Invalid): + message = _("Invalid volume") + ": %(reason)s" + + +class InvalidContentType(Invalid): + message = _("Invalid content type %(content_type)s.") + + +class InvalidUnicodeParameter(Invalid): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class ServiceUnavailable(Invalid): + message = _("Service is unavailable at this time.") + + +class ImageUnacceptable(Invalid): + message = _("Image %(image_id)s is unacceptable: %(reason)s") + + +class InvalidUUID(Invalid): + message = _("Expected a uuid but received %(uuid).") + + +class NotFound(CinderException): + message = _("Resource could not be found.") + code = 404 + safe = True + + +class PersistentVolumeFileNotFound(NotFound): + message = _("Volume %(volume_id)s persistence file could not be found.") + + +class VolumeNotFound(NotFound): + message = _("Volume %(volume_id)s could not be found.") + + +class SfAccountNotFound(NotFound): + message = _("Unable to locate account %(account_name)s on " + "Solidfire device") + + +class VolumeNotFoundForInstance(VolumeNotFound): + message = _("Volume not found for instance %(instance_id)s.") + + +class VolumeMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no metadata with " + "key %(metadata_key)s.") + + +class InvalidVolumeMetadata(Invalid): + message = _("Invalid metadata") + ": %(reason)s" + + +class InvalidVolumeMetadataSize(Invalid): + message = _("Invalid metadata size") + ": %(reason)s" + + +class SnapshotMetadataNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s has no metadata with " + "key %(metadata_key)s.") + + +class InvalidSnapshotMetadata(Invalid): + message = _("Invalid metadata") + ": %(reason)s" + + +class InvalidSnapshotMetadataSize(Invalid): + message = _("Invalid metadata size") + ": %(reason)s" + + +class VolumeTypeNotFound(NotFound): + message = _("Volume type %(volume_type_id)s could not be found.") + + +class VolumeTypeNotFoundByName(VolumeTypeNotFound): + message = _("Volume type with name %(volume_type_name)s " + "could not be found.") + + +class VolumeTypeExtraSpecsNotFound(NotFound): + message = _("Volume Type %(volume_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class VolumeIsBusy(CinderException): + message = _("deleting volume %(volume_name)s that has snapshot") + + +class SnapshotIsBusy(CinderException): + message = _("deleting snapshot %(snapshot_name)s that has " + "dependent volumes") + + +class ISCSITargetNotFoundForVolume(NotFound): + message = _("No target id found for volume %(volume_id)s.") + + +class ISCSITargetCreateFailed(CinderException): + message = _("Failed to create iscsi target for volume %(volume_id)s.") + + +class ISCSITargetAttachFailed(CinderException): + message = _("Failed to attach iSCSI target for volume %(volume_id)s.") + + +class ISCSITargetRemoveFailed(CinderException): + message = _("Failed to remove iscsi target for volume %(volume_id)s.") + + +class DiskNotFound(NotFound): + message = _("No disk at %(location)s") + + +class InvalidImageRef(Invalid): + message = _("Invalid image href %(image_href)s.") + + +class ImageNotFound(NotFound): + message = _("Image %(image_id)s could not be found.") + + +class ServiceNotFound(NotFound): + message = _("Service %(service_id)s could not be found.") + + +class HostNotFound(NotFound): + message = _("Host %(host)s could not be found.") + + +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class SchedulerHostWeigherNotFound(NotFound): + message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") + + +class HostBinaryNotFound(NotFound): + message = _("Could not find binary %(binary)s on host %(host)s.") + + +class InvalidReservationExpiration(Invalid): + message = _("Invalid reservation expiration %(expire)s.") + + +class InvalidQuotaValue(Invalid): + message = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class QuotaNotFound(NotFound): + message = _("Quota could not be found") + + +class QuotaResourceUnknown(QuotaNotFound): + message = _("Unknown quota resources %(unknown)s.") + + +class ProjectQuotaNotFound(QuotaNotFound): + message = _("Quota for project %(project_id)s could not be found.") + + +class QuotaClassNotFound(QuotaNotFound): + message = _("Quota class %(class_name)s could not be found.") + + +class QuotaUsageNotFound(QuotaNotFound): + message = _("Quota usage for project %(project_id)s could not be found.") + + +class ReservationNotFound(QuotaNotFound): + message = _("Quota reservation %(uuid)s could not be found.") + + +class OverQuota(CinderException): + message = _("Quota exceeded for resources: %(overs)s") + + +class MigrationNotFound(NotFound): + message = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + message = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class FileNotFound(NotFound): + message = _("File %(file_path)s could not be found.") + + +class ClassNotFound(NotFound): + message = _("Class %(class_name)s could not be found: %(exception)s") + + +class NotAllowed(CinderException): + message = _("Action not allowed.") + + +#TODO(bcwaldon): EOL this exception! +class Duplicate(CinderException): + pass + + +class KeyPairExists(Duplicate): + message = _("Key pair %(key_name)s already exists.") + + +class VolumeTypeExists(Duplicate): + message = _("Volume Type %(id)s already exists.") + + +class MigrationError(CinderException): + message = _("Migration error") + ": %(reason)s" + + +class MalformedRequestBody(CinderException): + message = _("Malformed message body: %(reason)s") + + +class ConfigNotFound(NotFound): + message = _("Could not find config at %(path)s") + + +class PasteAppNotFound(NotFound): + message = _("Could not load paste app '%(name)s' from %(path)s") + + +class NoValidHost(CinderException): + message = _("No valid host was found. %(reason)s") + + +class WillNotSchedule(CinderException): + message = _("Host %(host)s is not up or doesn't exist.") + + +class QuotaError(CinderException): + message = _("Quota exceeded") + ": code=%(code)s" + code = 413 + headers = {'Retry-After': 0} + safe = True + + +class VolumeSizeExceedsAvailableQuota(QuotaError): + message = _("Requested volume or snapshot exceeds " + "allowed Gigabytes quota") + + +class VolumeSizeExceedsQuota(QuotaError): + message = _("Maximum volume/snapshot size exceeded") + + +class VolumeLimitExceeded(QuotaError): + message = _("Maximum number of volumes allowed (%(allowed)d) exceeded") + + +class SnapshotLimitExceeded(QuotaError): + message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") + + +class DuplicateSfVolumeNames(Duplicate): + message = _("Detected more than one volume with name %(vol_name)s") + + +class Duplicate3PARHost(CinderException): + message = _("3PAR Host already exists: %(err)s. %(info)s") + + +class Invalid3PARDomain(CinderException): + message = _("Invalid 3PAR Domain: %(err)s") + + +class VolumeTypeCreateFailed(CinderException): + message = _("Cannot create volume_type with " + "name %(name)s and specs %(extra_specs)s") + + +class SolidFireAPIException(CinderException): + message = _("Bad response from SolidFire API") + + +class SolidFireAPIDataException(SolidFireAPIException): + message = _("Error in SolidFire API response: data=%(data)s") + + +class UnknownCmd(Invalid): + message = _("Unknown or unsupported command %(cmd)s") + + +class MalformedResponse(Invalid): + message = _("Malformed response to command %(cmd)s: %(reason)s") + + +class BadHTTPResponseStatus(CinderException): + message = _("Bad HTTP response status %(status)s") + + +class FailedCmdWithDump(CinderException): + message = _("Operation failed with status=%(status)s. Full dump: %(data)s") + + +class ZadaraServerCreateFailure(CinderException): + message = _("Unable to create server object for initiator %(name)s") + + +class ZadaraServerNotFound(NotFound): + message = _("Unable to find server object for initiator %(name)s") + + +class ZadaraVPSANoActiveController(CinderException): + message = _("Unable to find any active VPSA controller") + + +class ZadaraAttachmentsNotFound(NotFound): + message = _("Failed to retrieve attachments for volume %(name)s") + + +class ZadaraInvalidAttachmentInfo(Invalid): + message = _("Invalid attachment info for volume %(name)s: %(reason)s") + + +class InstanceNotFound(NotFound): + message = _("Instance %(instance_id)s could not be found.") + + +class VolumeBackendAPIException(CinderException): + message = _("Bad or unexpected response from the storage volume " + "backend API: %(data)s") + + +class NfsException(CinderException): + message = _("Unknown NFS exception") + + +class NfsNoSharesMounted(NotFound): + message = _("No mounted NFS shares found") + + +class NfsNoSuitableShareFound(NotFound): + message = _("There is no share which can host %(volume_size)sG") + + +class GlusterfsException(CinderException): + message = _("Unknown Gluster exception") + + +class GlusterfsNoSharesMounted(NotFound): + message = _("No mounted Gluster shares found") + + +class GlusterfsNoSuitableShareFound(NotFound): + message = _("There is no share which can host %(volume_size)sG") + + +class GlanceMetadataExists(Invalid): + message = _("Glance metadata cannot be updated, key %(key)s" + " exists for volume id %(volume_id)s") + + +class ImageCopyFailure(Invalid): + message = _("Failed to copy image to volume") + + +class BackupNotFound(NotFound): + message = _("Backup %(backup_id)s could not be found.") + + +class InvalidBackup(Invalid): + message = _("Invalid backup: %(reason)s") + + +class InvalidShare(CinderException): + message = _("Invalid share: %(reason)s") + + +class ShareAccessNotFound(NotFound): + message = _("Access_id %(access_id)s not found") + + +class ShareAccessExists(Duplicate): + message = _("Share access %(access_type)s:%(access)s exists") + + +class InvalidShareAccess(CinderException): + message = _("Invalid access_rule: %(reason)s") + + +class ShareIsBusy(CinderException): + message = _("Deleting $(share_name) share that used") + + +class ShareBackendException(CinderException): + message = _("Share backend error: %(msg)s") + + +class ShareSnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class ShareSnapshotIsBusy(CinderException): + message = _("Deleting snapshot %(snapshot_name)s that has " + "dependent shares.") + + +class InvalidShareSnapshot(CinderException): + message = _("Invalid share snapshot: %(reason)s") + + +class SwiftConnectionFailed(CinderException): + message = _("Connection to swift failed") + ": %(reason)s" diff --git a/cinder/flags.py b/cinder/flags.py new file mode 100644 index 0000000000..47daaa3ba9 --- /dev/null +++ b/cinder/flags.py @@ -0,0 +1,261 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Command-line flag library. + +Emulates gflags by wrapping cfg.ConfigOpts. + +The idea is to move fully to cfg eventually, and this wrapper is a +stepping stone. + +""" + +import os +import socket +import sys + +from oslo.config import cfg + +from cinder import version + +FLAGS = cfg.CONF + + +def parse_args(argv, default_config_files=None): + FLAGS(argv[1:], project='cinder', + version=version.version_string(), + default_config_files=default_config_files) + + +class UnrecognizedFlag(Exception): + pass + + +def DECLARE(name, module_string, flag_values=FLAGS): + if module_string not in sys.modules: + __import__(module_string, globals(), locals()) + if name not in flag_values: + raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) + + +def _get_my_ip(): + """ + Returns the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return "127.0.0.1" + + +core_opts = [ + cfg.StrOpt('connection_type', + default=None, + help='Virtualization api connection type : libvirt, xenapi, ' + 'or fake'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True), + cfg.IntOpt('sql_connection_debug', + default=0, + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for cinder-api'), + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the cinder python module is installed'), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help='Directory where cinder binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining cinder's state"), ] + +debug_opts = [ +] + +FLAGS.register_cli_opts(core_opts) +FLAGS.register_cli_opts(debug_opts) + +global_opts = [ + cfg.StrOpt('my_ip', + default=_get_my_ip(), + help='ip address of this host'), + cfg.StrOpt('glance_host', + default='$my_ip', + help='default glance hostname or ip'), + cfg.IntOpt('glance_port', + default=9292, + help='default glance port'), + cfg.ListOpt('glance_api_servers', + default=['$glance_host:$glance_port'], + help='A list of the glance api servers available to cinder ' + '([hostname|ip]:port)'), + cfg.IntOpt('glance_api_version', + default=1, + help='Version of the glance api to use'), + cfg.IntOpt('glance_num_retries', + default=0, + help='Number retries when downloading an image from glance'), + cfg.BoolOpt('glance_api_insecure', + default=False, + help='Allow to perform insecure SSL (https) requests to ' + 'glance'), + cfg.StrOpt('scheduler_topic', + default='cinder-scheduler', + help='the topic scheduler nodes listen on'), + cfg.StrOpt('volume_topic', + default='cinder-volume', + help='the topic volume nodes listen on'), + cfg.StrOpt('backup_topic', + default='cinder-backup', + help='the topic volume backup nodes listen on'), + cfg.StrOpt('share_topic', + default='cinder-share', + help='the topic share nodes listen on'), + cfg.BoolOpt('enable_v1_api', + default=True, + help=_("Deploy v1 of the Cinder API. ")), + cfg.BoolOpt('enable_v2_api', + default=True, + help=_("Deploy v2 of the Cinder API. ")), + cfg.BoolOpt('api_rate_limit', + default=True, + help='whether to rate limit the api'), + cfg.ListOpt('osapi_volume_ext_list', + default=[], + help='Specify list of extensions to load when using osapi_' + 'volume_extension option with cinder.api.contrib.' + 'select_extensions'), + cfg.MultiStrOpt('osapi_volume_extension', + default=['cinder.api.contrib.standard_extensions'], + help='osapi volume extension to load'), + cfg.StrOpt('osapi_volume_base_URL', + default=None, + help='Base URL that will be presented to users in links ' + 'to the OpenStack Volume API', + deprecated_name='osapi_compute_link_prefix'), + cfg.IntOpt('osapi_max_limit', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), + cfg.StrOpt('sqlite_db', + default='cinder.sqlite', + help='the filename to use with sqlite'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If passed, use synchronous mode for sqlite'), + cfg.IntOpt('sql_idle_timeout', + default=3600, + help='timeout before idle sql connections are reaped'), + cfg.IntOpt('sql_max_retries', + default=10, + help='maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('sql_retry_interval', + default=10, + help='interval between retries of opening a sql connection'), + cfg.StrOpt('volume_manager', + default='cinder.volume.manager.VolumeManager', + help='full class name for the Manager for volume'), + cfg.StrOpt('backup_manager', + default='cinder.backup.manager.BackupManager', + help='full class name for the Manager for volume backup'), + cfg.StrOpt('scheduler_manager', + default='cinder.scheduler.manager.SchedulerManager', + help='full class name for the Manager for scheduler'), + cfg.StrOpt('share_manager', + default='cinder.share.manager.ShareManager', + help='full class name for the Manager for share'), + cfg.StrOpt('host', + default=socket.gethostname(), + help='Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address.'), + # NOTE(vish): default to nova for compatibility with nova installs + cfg.StrOpt('storage_availability_zone', + default='nova', + help='availability zone of this node'), + cfg.ListOpt('memcached_servers', + default=None, + help='Memcached servers or None for in process cache.'), + cfg.StrOpt('default_volume_type', + default=None, + help='default volume type to use'), + cfg.StrOpt('volume_usage_audit_period', + default='month', + help='time period to generate volume usages for. ' + 'Time period must be hour, day, month or year'), + cfg.StrOpt('root_helper', + default='sudo', + help='Deprecated: command to use for running commands as root'), + cfg.StrOpt('rootwrap_config', + default=None, + help='Path to the rootwrap configuration file to use for ' + 'running commands as root'), + cfg.BoolOpt('monkey_patch', + default=False, + help='Whether to log monkey patching'), + cfg.ListOpt('monkey_patch_modules', + default=[], + help='List of modules/decorators to monkey patch'), + cfg.IntOpt('service_down_time', + default=60, + help='maximum time since last check-in for up service'), + cfg.StrOpt('volume_api_class', + default='cinder.volume.api.API', + help='The full class name of the volume API class to use'), + cfg.StrOpt('backup_api_class', + default='cinder.backup.api.API', + help='The full class name of the volume backup API class'), + cfg.StrOpt('share_api_class', + default='cinder.share.api.API', + help='The full class name of the share API class to use'), + cfg.StrOpt('auth_strategy', + default='noauth', + help='The strategy to use for auth. Supports noauth, keystone, ' + 'and deprecated.'), + cfg.ListOpt('enabled_backends', + default=None, + help='A list of backend names to use. These backend names ' + 'should be backed by a unique [CONFIG] group ' + 'with its options'), + cfg.ListOpt('enabled_share_backends', + default=None, + help='A list of share backend names to use. These backend ' + 'names should be backed by a unique [CONFIG] group ' + 'with its options'), + cfg.BoolOpt('no_snapshot_gb_quota', + default=False, + help='Whether snapshots count against GigaByte quota'), ] + +FLAGS.register_opts(global_opts) diff --git a/cinder/image/__init__.py b/cinder/image/__init__.py new file mode 100644 index 0000000000..7affa08d4c --- /dev/null +++ b/cinder/image/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/image/glance.py b/cinder/image/glance.py new file mode 100644 index 0000000000..bfec5288e7 --- /dev/null +++ b/cinder/image/glance.py @@ -0,0 +1,460 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of an image service that uses Glance as the backend""" + +from __future__ import absolute_import + +import copy +import itertools +import random +import sys +import time +import urlparse + +import glanceclient +import glanceclient.exc + +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +def _parse_image_ref(image_href): + """Parse an image href into composite parts. + + :param image_href: href of an image + :returns: a tuple of the form (image_id, host, port) + :raises ValueError + + """ + url = urlparse.urlparse(image_href) + port = url.port or 80 + host = url.netloc.split(':', 1)[0] + image_id = url.path.split('/')[-1] + use_ssl = (url.scheme == 'https') + return (image_id, host, port, use_ssl) + + +def _create_glance_client(context, host, port, use_ssl, + version=FLAGS.glance_api_version): + """Instantiate a new glanceclient.Client object""" + if version is None: + version = FLAGS.glance_api_version + if use_ssl: + scheme = 'https' + else: + scheme = 'http' + params = {} + params['insecure'] = FLAGS.glance_api_insecure + if FLAGS.auth_strategy == 'keystone': + params['token'] = context.auth_token + endpoint = '%s://%s:%s' % (scheme, host, port) + return glanceclient.Client(str(version), endpoint, **params) + + +def get_api_servers(): + """ + Shuffle a list of FLAGS.glance_api_servers and return an iterator + that will cycle through the list, looping around to the beginning + if necessary. + """ + api_servers = [] + for api_server in FLAGS.glance_api_servers: + if '//' not in api_server: + api_server = 'http://' + api_server + url = urlparse.urlparse(api_server) + port = url.port or 80 + host = url.netloc.split(':', 1)[0] + use_ssl = (url.scheme == 'https') + api_servers.append((host, port, use_ssl)) + random.shuffle(api_servers) + return itertools.cycle(api_servers) + + +class GlanceClientWrapper(object): + """Glance client wrapper class that implements retries.""" + + def __init__(self, context=None, host=None, port=None, use_ssl=False, + version=None): + if host is not None: + self.client = self._create_static_client(context, + host, port, + use_ssl, version) + else: + self.client = None + self.api_servers = None + self.version = version + + def _create_static_client(self, context, host, port, use_ssl, version): + """Create a client that we'll use for every call.""" + self.host = host + self.port = port + self.use_ssl = use_ssl + self.version = version + return _create_glance_client(context, + self.host, self.port, + self.use_ssl, self.version) + + def _create_onetime_client(self, context, version): + """Create a client that will be used for one call.""" + if self.api_servers is None: + self.api_servers = get_api_servers() + self.host, self.port, self.use_ssl = self.api_servers.next() + return _create_glance_client(context, + self.host, self.port, + self.use_ssl, version) + + def call(self, context, method, *args, **kwargs): + """ + Call a glance client method. If we get a connection error, + retry the request according to FLAGS.glance_num_retries. + """ + version = self.version + if version in kwargs: + version = kwargs['version'] + + retry_excs = (glanceclient.exc.ServiceUnavailable, + glanceclient.exc.InvalidEndpoint, + glanceclient.exc.CommunicationError) + num_attempts = 1 + FLAGS.glance_num_retries + + for attempt in xrange(1, num_attempts + 1): + client = self.client or self._create_onetime_client(context, + version) + try: + return getattr(client.images, method)(*args, **kwargs) + except retry_excs as e: + host = self.host + port = self.port + extra = "retrying" + error_msg = _("Error contacting glance server " + "'%(host)s:%(port)s' for '%(method)s', " + "%(extra)s.") + if attempt == num_attempts: + extra = 'done trying' + LOG.exception(error_msg, locals()) + raise exception.GlanceConnectionFailed(host=host, + port=port, + reason=str(e)) + LOG.exception(error_msg, locals()) + time.sleep(1) + + +class GlanceImageService(object): + """Provides storage and retrieval of disk image objects within Glance.""" + + def __init__(self, client=None): + self._client = client or GlanceClientWrapper() + + def detail(self, context, **kwargs): + """Calls out to Glance for a list of detailed image information.""" + params = self._extract_query_params(kwargs) + try: + images = self._client.call(context, 'list', **params) + except Exception: + _reraise_translated_exception() + + _images = [] + for image in images: + if self._is_image_available(context, image): + _images.append(self._translate_from_glance(image)) + + return _images + + def _extract_query_params(self, params): + _params = {} + accepted_params = ('filters', 'marker', 'limit', + 'sort_key', 'sort_dir') + for param in accepted_params: + if param in params: + _params[param] = params.get(param) + + # ensure filters is a dict + _params.setdefault('filters', {}) + # NOTE(vish): don't filter out private images + _params['filters'].setdefault('is_public', 'none') + + return _params + + def show(self, context, image_id): + """Returns a dict with image data for the given opaque image id.""" + try: + image = self._client.call(context, 'get', image_id) + except Exception: + _reraise_translated_image_exception(image_id) + + if not self._is_image_available(context, image): + raise exception.ImageNotFound(image_id=image_id) + + base_image_meta = self._translate_from_glance(image) + return base_image_meta + + def get_location(self, context, image_id): + """Returns the direct url representing the backend storage location, + or None if this attribute is not shown by Glance.""" + try: + client = GlanceClientWrapper() + image_meta = client.call(context, 'get', image_id) + except Exception: + _reraise_translated_image_exception(image_id) + + if not self._is_image_available(context, image_meta): + raise exception.ImageNotFound(image_id=image_id) + + return getattr(image_meta, 'direct_url', None) + + def download(self, context, image_id, data): + """Calls out to Glance for metadata and data and writes data.""" + try: + image_chunks = self._client.call(context, 'data', image_id) + except Exception: + _reraise_translated_image_exception(image_id) + + for chunk in image_chunks: + data.write(chunk) + + def create(self, context, image_meta, data=None): + """Store the image data and return the new image object.""" + sent_service_image_meta = self._translate_to_glance(image_meta) + + if data: + sent_service_image_meta['data'] = data + + recv_service_image_meta = self._client.call(context, 'create', + **sent_service_image_meta) + + return self._translate_from_glance(recv_service_image_meta) + + def update(self, context, image_id, + image_meta, data=None, purge_props=True): + """Modify the given image with the new data.""" + image_meta = self._translate_to_glance(image_meta) + image_meta['purge_props'] = purge_props + #NOTE(bcwaldon): id is not an editable field, but it is likely to be + # passed in by calling code. Let's be nice and ignore it. + image_meta.pop('id', None) + if data: + image_meta['data'] = data + try: + image_meta = self._client.call(context, 'update', image_id, + **image_meta) + except Exception: + _reraise_translated_image_exception(image_id) + else: + return self._translate_from_glance(image_meta) + + def delete(self, context, image_id): + """Delete the given image. + + :raises: ImageNotFound if the image does not exist. + :raises: NotAuthorized if the user is not an owner. + + """ + try: + self._client.call(context, 'delete', image_id) + except glanceclient.exc.NotFound: + raise exception.ImageNotFound(image_id=image_id) + return True + + @staticmethod + def _translate_to_glance(image_meta): + image_meta = _convert_to_string(image_meta) + image_meta = _remove_read_only(image_meta) + return image_meta + + @staticmethod + def _translate_from_glance(image): + image_meta = _extract_attributes(image) + image_meta = _convert_timestamps_to_datetimes(image_meta) + image_meta = _convert_from_string(image_meta) + return image_meta + + @staticmethod + def _is_image_available(context, image): + """Check image availability. + + This check is needed in case Nova and Glance are deployed + without authentication turned on. + """ + # The presence of an auth token implies this is an authenticated + # request and we need not handle the noauth use-case. + if hasattr(context, 'auth_token') and context.auth_token: + return True + + if image.is_public or context.is_admin: + return True + + properties = image.properties + + if context.project_id and ('owner_id' in properties): + return str(properties['owner_id']) == str(context.project_id) + + if context.project_id and ('project_id' in properties): + return str(properties['project_id']) == str(context.project_id) + + try: + user_id = properties['user_id'] + except KeyError: + return False + + return str(user_id) == str(context.user_id) + + +def _convert_timestamps_to_datetimes(image_meta): + """Returns image with timestamp fields converted to datetime objects.""" + for attr in ['created_at', 'updated_at', 'deleted_at']: + if image_meta.get(attr): + image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) + return image_meta + + +# NOTE(bcwaldon): used to store non-string data in glance metadata +def _json_loads(properties, attr): + prop = properties[attr] + if isinstance(prop, basestring): + properties[attr] = jsonutils.loads(prop) + + +def _json_dumps(properties, attr): + prop = properties[attr] + if not isinstance(prop, basestring): + properties[attr] = jsonutils.dumps(prop) + + +_CONVERT_PROPS = ('block_device_mapping', 'mappings') + + +def _convert(method, metadata): + metadata = copy.deepcopy(metadata) + properties = metadata.get('properties') + if properties: + for attr in _CONVERT_PROPS: + if attr in properties: + method(properties, attr) + + return metadata + + +def _convert_from_string(metadata): + return _convert(_json_loads, metadata) + + +def _convert_to_string(metadata): + return _convert(_json_dumps, metadata) + + +def _extract_attributes(image): + IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', + 'container_format', 'checksum', 'id', + 'name', 'created_at', 'updated_at', + 'deleted_at', 'deleted', 'status', + 'min_disk', 'min_ram', 'is_public'] + output = {} + for attr in IMAGE_ATTRIBUTES: + output[attr] = getattr(image, attr, None) + + output['properties'] = getattr(image, 'properties', {}) + + return output + + +def _remove_read_only(image_meta): + IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] + output = copy.deepcopy(image_meta) + for attr in IMAGE_ATTRIBUTES: + if attr in output: + del output[attr] + return output + + +def _reraise_translated_image_exception(image_id): + """Transform the exception for the image but keep its traceback intact.""" + exc_type, exc_value, exc_trace = sys.exc_info() + new_exc = _translate_image_exception(image_id, exc_value) + raise new_exc, None, exc_trace + + +def _reraise_translated_exception(): + """Transform the exception but keep its traceback intact.""" + exc_type, exc_value, exc_trace = sys.exc_info() + new_exc = _translate_plain_exception(exc_value) + raise new_exc, None, exc_trace + + +def _translate_image_exception(image_id, exc_value): + if isinstance(exc_value, (glanceclient.exc.Forbidden, + glanceclient.exc.Unauthorized)): + return exception.ImageNotAuthorized(image_id=image_id) + if isinstance(exc_value, glanceclient.exc.NotFound): + return exception.ImageNotFound(image_id=image_id) + if isinstance(exc_value, glanceclient.exc.BadRequest): + return exception.Invalid(exc_value) + return exc_value + + +def _translate_plain_exception(exc_value): + if isinstance(exc_value, (glanceclient.exc.Forbidden, + glanceclient.exc.Unauthorized)): + return exception.NotAuthorized(exc_value) + if isinstance(exc_value, glanceclient.exc.NotFound): + return exception.NotFound(exc_value) + if isinstance(exc_value, glanceclient.exc.BadRequest): + return exception.Invalid(exc_value) + return exc_value + + +def get_remote_image_service(context, image_href): + """Create an image_service and parse the id from the given image_href. + + The image_href param can be an href of the form + 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', + or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the + image_href is a standalone id, then the default image service is returned. + + :param image_href: href that describes the location of an image + :returns: a tuple of the form (image_service, image_id) + + """ + #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a + # standalone image ID + if '/' not in str(image_href): + image_service = get_default_image_service() + return image_service, image_href + + try: + (image_id, glance_host, glance_port, use_ssl) = \ + _parse_image_ref(image_href) + glance_client = GlanceClientWrapper(context=context, + host=glance_host, + port=glance_port, + use_ssl=use_ssl) + except ValueError: + raise exception.InvalidImageRef(image_href=image_href) + + image_service = GlanceImageService(client=glance_client) + return image_service, image_id + + +def get_default_image_service(): + return GlanceImageService() diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py new file mode 100644 index 0000000000..417f2b90c9 --- /dev/null +++ b/cinder/image/image_utils.py @@ -0,0 +1,283 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods to deal with images. + +This is essentially a copy from nova.virt.images.py +Some slight modifications, but at some point +we should look at maybe pushign this up to OSLO +""" + +import os +import re +import tempfile + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + +image_helper_opt = [cfg.StrOpt('image_conversion_dir', + default='/tmp', + help='parent dir for tempdir used for image conversion'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(image_helper_opt) + + +class QemuImgInfo(object): + BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:" + r"\s+(.*?)\)\s*$"), re.I) + TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$") + SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I) + + def __init__(self, cmd_output): + details = self._parse(cmd_output) + self.image = details.get('image') + self.backing_file = details.get('backing_file') + self.file_format = details.get('file_format') + self.virtual_size = details.get('virtual_size') + self.cluster_size = details.get('cluster_size') + self.disk_size = details.get('disk_size') + self.snapshots = details.get('snapshot_list', []) + self.encryption = details.get('encryption') + + def __str__(self): + lines = [ + 'image: %s' % self.image, + 'file_format: %s' % self.file_format, + 'virtual_size: %s' % self.virtual_size, + 'disk_size: %s' % self.disk_size, + 'cluster_size: %s' % self.cluster_size, + 'backing_file: %s' % self.backing_file, + ] + if self.snapshots: + lines.append("snapshots: %s" % self.snapshots) + return "\n".join(lines) + + def _canonicalize(self, field): + # Standardize on underscores/lc/no dash and no spaces + # since qemu seems to have mixed outputs here... and + # this format allows for better integration with python + # - ie for usage in kwargs and such... + field = field.lower().strip() + for c in (" ", "-"): + field = field.replace(c, '_') + return field + + def _extract_bytes(self, details): + # Replace it with the byte amount + real_size = self.SIZE_RE.search(details) + if real_size: + details = real_size.group(1) + try: + details = utils.to_bytes(details) + except (TypeError, ValueError): + pass + return details + + def _extract_details(self, root_cmd, root_details, lines_after): + consumed_lines = 0 + real_details = root_details + if root_cmd == 'backing_file': + # Replace it with the real backing file + backing_match = self.BACKING_FILE_RE.match(root_details) + if backing_match: + real_details = backing_match.group(2).strip() + elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: + # Replace it with the byte amount (if we can convert it) + real_details = self._extract_bytes(root_details) + elif root_cmd == 'file_format': + real_details = real_details.strip().lower() + elif root_cmd == 'snapshot_list': + # Next line should be a header, starting with 'ID' + if not lines_after or not lines_after[0].startswith("ID"): + msg = _("Snapshot list encountered but no header found!") + raise ValueError(msg) + consumed_lines += 1 + possible_contents = lines_after[1:] + real_details = [] + # This is the sprintf pattern we will try to match + # "%-10s%-20s%7s%20s%15s" + # ID TAG VM SIZE DATE VM CLOCK (current header) + for line in possible_contents: + line_pieces = line.split(None) + if len(line_pieces) != 6: + break + else: + # Check against this pattern occuring in the final position + # "%02d:%02d:%02d.%03d" + date_pieces = line_pieces[5].split(":") + if len(date_pieces) != 3: + break + real_details.append({ + 'id': line_pieces[0], + 'tag': line_pieces[1], + 'vm_size': line_pieces[2], + 'date': line_pieces[3], + 'vm_clock': line_pieces[4] + " " + line_pieces[5], + }) + consumed_lines += 1 + return (real_details, consumed_lines) + + def _parse(self, cmd_output): + # Analysis done of qemu-img.c to figure out what is going on here + # Find all points start with some chars and then a ':' then a newline + # and then handle the results of those 'top level' items in a separate + # function. + # + # TODO(harlowja): newer versions might have a json output format + # we should switch to that whenever possible. + # see: http://bit.ly/XLJXDX + if not cmd_output: + cmd_output = '' + contents = {} + lines = cmd_output.splitlines() + i = 0 + line_am = len(lines) + while i < line_am: + line = lines[i] + if not line.strip(): + i += 1 + continue + consumed_lines = 0 + top_level = self.TOP_LEVEL_RE.match(line) + if top_level: + root = self._canonicalize(top_level.group(1)) + if not root: + i += 1 + continue + root_details = top_level.group(2).strip() + details, consumed_lines = self._extract_details(root, + root_details, + lines[i + 1:]) + contents[root] = details + i += consumed_lines + 1 + return contents + + +def qemu_img_info(path): + """Return a object containing the parsed output from qemu-img info.""" + out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', + 'qemu-img', 'info', path, + run_as_root=True) + return QemuImgInfo(out) + + +def convert_image(source, dest, out_format): + """Convert image to other format""" + cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) + utils.execute(*cmd, run_as_root=True) + + +def fetch(context, image_service, image_id, path, _user_id, _project_id): + # TODO(vish): Improve context handling and add owner and auth data + # when it is added to glance. Right now there is no + # auth checking in glance, so we assume that access was + # checked before we got here. + with utils.remove_path_on_error(path): + with open(path, "wb") as image_file: + image_service.download(context, image_id, image_file) + + +def fetch_to_raw(context, image_service, + image_id, dest, + user_id=None, project_id=None): + if (FLAGS.image_conversion_dir and not + os.path.exists(FLAGS.image_conversion_dir)): + os.makedirs(FLAGS.image_conversion_dir) + + # NOTE(avishay): I'm not crazy about creating temp files which may be + # large and cause disk full errors which would confuse users. + # Unfortunately it seems that you can't pipe to 'qemu-img convert' because + # it seeks. Maybe we can think of something for a future version. + fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir) + os.close(fd) + with utils.remove_path_on_error(tmp): + fetch(context, image_service, image_id, tmp, user_id, project_id) + + data = qemu_img_info(tmp) + fmt = data.file_format + if fmt is None: + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), + image_id=image_id) + + backing_file = data.backing_file + if backing_file is not None: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("fmt=%(fmt)s backed by:" + "%(backing_file)s") % locals()) + + # NOTE(jdg): I'm using qemu-img convert to write + # to the volume regardless if it *needs* conversion or not + # TODO(avishay): We can speed this up by checking if the image is raw + # and if so, writing directly to the device. However, we need to keep + # check via 'qemu-img info' that what we copied was in fact a raw + # image and not a different format with a backing file, which may be + # malicious. + LOG.debug("%s was %s, converting to raw" % (image_id, fmt)) + convert_image(tmp, dest, 'raw') + + data = qemu_img_info(dest) + if data.file_format != "raw": + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("Converted to raw, but format is now %s") % + data.file_format) + os.unlink(tmp) + + +def upload_volume(context, image_service, image_meta, volume_path): + image_id = image_meta['id'] + if (image_meta['disk_format'] == 'raw'): + LOG.debug("%s was raw, no need to convert to %s" % + (image_id, image_meta['disk_format'])) + with utils.temporary_chown(volume_path): + with utils.file_open(volume_path) as image_file: + image_service.update(context, image_id, {}, image_file) + return + + if (FLAGS.image_conversion_dir and not + os.path.exists(FLAGS.image_conversion_dir)): + os.makedirs(FLAGS.image_conversion_dir) + + fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir) + os.close(fd) + with utils.remove_path_on_error(tmp): + LOG.debug("%s was raw, converting to %s" % + (image_id, image_meta['disk_format'])) + convert_image(volume_path, tmp, image_meta['disk_format']) + + data = qemu_img_info(tmp) + if data.file_format != image_meta['disk_format']: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("Converted to %(f1)s, but format is now %(f2)s") % + {'f1': image_meta['disk_format'], 'f2': data.file_format}) + + with utils.file_open(tmp) as image_file: + image_service.update(context, image_id, {}, image_file) + os.unlink(tmp) diff --git a/cinder/locale/bg_BG/LC_MESSAGES/cinder.po b/cinder/locale/bg_BG/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..99d199eec5 --- /dev/null +++ b/cinder/locale/bg_BG/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Bulgarian (Bulgaria) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Bulgarian (Bulgaria) " +"(http://www.transifex.com/projects/p/openstack/language/bg_BG/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/bs/LC_MESSAGES/cinder.po b/cinder/locale/bs/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f6c4550d49 --- /dev/null +++ b/cinder/locale/bs/LC_MESSAGES/cinder.po @@ -0,0 +1,5581 @@ +# Bosnian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-01-19 20:22+0000\n" +"Last-Translator: yazar \n" +"Language-Team: Bosnian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/cinder.pot b/cinder/locale/cinder.pot new file mode 100644 index 0000000000..1ab42f6799 --- /dev/null +++ b/cinder/locale/cinder.pot @@ -0,0 +1,5573 @@ +# Translations template for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# FIRST AUTHOR , 2013. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: cinder jenkins.cinder.propose.translation.update.5\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/cs/LC_MESSAGES/cinder.po b/cinder/locale/cs/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f733b85c1f --- /dev/null +++ b/cinder/locale/cs/LC_MESSAGES/cinder.po @@ -0,0 +1,5631 @@ +# Czech translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-04 20:28+0000\n" +"Last-Translator: Zbyněk Schwarz \n" +"Language-Team: Czech \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Příkaz: %(cmd)s\n" +"Kód ukončení: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "Vyjímka DB zabalena." + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Připojení k glance selhalo" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Neschváleno." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "Uživatel nemá správcovská oprávnění" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Pravidla nedovolují, aby bylo %(action)s provedeno." + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Kernel nenalezen v obrazu %(image_id)s." + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Nepřijatelné parametry." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Neplatný snímek" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Svazek %(volume_id)s není k ničemu připojen" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Nelze načíst data do formátu json" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "Požadavek je neplatný." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "Požadavek je neplatný." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Obdržen neplatný vstup" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Neplatný typ svazku" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Neplatný svazek" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Neplatný typ obsahu %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "Služba je v tuto chvíli nedostupná." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "Zdroj nemohl být nalezen." + +#: cinder/exception.py:229 +#, fuzzy, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/exception.py:237 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Nelze nalézt účet %(account_name) on zařízení Solidfire" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "Neplatná metadata" + +#: cinder/exception.py:255 cinder/exception.py:268 +#, fuzzy +msgid "Invalid metadata size" +msgstr "Neplatný klíč metadata" + +#: cinder/exception.py:259 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Instance %(instance_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Typ svazku %(volume_type_id)s nemohl být nalezen." + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Typ svazku %(volume_type_id)s nemá žádné dodatečné parametry s klíčem " +"%(extra_specs_key)s." + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Snímek %(snapshot_id)s nemohl být nalezen." + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:307 +#, fuzzy, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "Źádný disk ve %(location)s" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Neplatný href %(image_href)s obrazu." + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Obraz %(image_id)s nemohl být nalezen." + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Služba %(service_id)s nemohla být nalezena." + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." + +#: cinder/exception.py:339 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s." + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "Kvóta nemohla být nalezena." + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." + +#: cinder/exception.py:368 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:372 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." + +#: cinder/exception.py:376 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Uživatel %(user_id)s nemohl být nalezen." + +#: cinder/exception.py:380 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Kvóta překročena" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Přesun %(migration_id)s nemohl být nalezen." + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Přesun nenalezen v instanci %(instance_id)s se stavem %(status)s." + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "Soubor %(file_path)s nemohl být nalezen." + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "Činnost není povolena." + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "Dvojice klíčů %(key_name)s již existuje." + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Typ svazku %(name)s již existuje." + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "Chyba přesunu" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Nelze najít nastavení v %(path)s" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Hostitel %(host)s není dostupný nebo neexistuje." + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "Kvóta překročena" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " +"%(extra_specs)s" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" + +#: cinder/exception.py:499 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Neplatný stav serveru: %(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Instance %(instance_id)s nemohla být nastavena." + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Skupina LDAP %(group_id)s nemohla být nalezena." + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Neplatná podpůrná vrstva: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Připojení k glance selhalo" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" +"Přeskakování %(full_task_name)s, zbývá %(ticks_to_skip)s tiků do dalšího " +"spuštění" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Spuštění pravidelné úlohy %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Chyba při %(full_task_name)s: %(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Oznamování schopností plácinderčům ..." + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "Soubor JSON představující zásady" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Kontrolované pravidlo, když požadované není nalezeno" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Vnitřní výjimka: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Začínající uzel %(topic)s (verze %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "Vytváření připojení zákazníka pro službu %s" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Ukončena služba bez záznamu v databázi" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Objekt databáze služby zmizel, je znovu vytvářen." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Obnoveno připojení modelového serveru!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "modelový server je nedostupný" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Úplná sada PŘÍZNAKŮ:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s : SADA PŽÍZNAKŮ " + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Získávání %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Získány neznámé argumenty klíčového slova pro utils.execute: %r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Spouštění příkazu (podproces): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Výsledek byl %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r selhalo. Opakování." + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Spouštění příkazu (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "Prostředí není podporováno přes SSH" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "process_input není podporován přes SSH" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "ladění ve zpětném volání: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Adresa místního spojení nenalezena.: %s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Nelze získat IP místního spojení %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Neplatná podpůrná vrstva: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "podpůrná vrstva: %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "v opakujícím volání" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "Očekáván objekt typu: %s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' trvalo %(total_time).2f sek" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, fuzzy, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "%(name)s spuštěno v %(host)s:%(port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Zastavování serveru WSGI." + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "Server WSGI byl zastaven." + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "Musíte zavést __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "parametr limit musí být celé číslo" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "parametr limit musí být kladný" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "parametr offset musí být celé číslo" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "parametr offset musí být kladný" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "značka [%s] nenalezena" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s neobsahuje verzi" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "Zavádění správce rozšíření." + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "Načteno rozšíření: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "Název roz: %s" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "Přezdívká roz: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "Popis roz: %s" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "Jmenný prostor roz: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "Roz aktualizováno: %s" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "Načítání rozšíření %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "Volání továrny rozšíření %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Nelze načít rozšížení %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "prvek není podřazený" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "kořenový prvek volí seznam" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" +"Neshoda stromu šablony; přidávání sluhy %(slavetag)s k pánovi " +"%(mastertag)s" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "podtřídy musí zavádět construct()!" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "Nsprávný formát těla požadavku" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +#, fuzzy +msgid "Snapshot not found." +msgstr "Server nenalezen." + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "XML nelze porozumět" + +#: cinder/api/contrib/hosts.py:133 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Neplatný stav: '%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Neplatné nastavení aktualizace: '%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource je funkce pouze pro správce" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +#, fuzzy +msgid "Request body empty" +msgstr "Nsprávný formát těla požadavku" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "Neshoda s tělem požadavku a URI" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "Tělo požadavku obsahuje příliš mnoho položek" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "Neplatné tělo požadavku" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Zachycena chyba: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s vrácena s HTTP %(status)d" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "Musí být určena třída ExtensionManager" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "Rozšířený zdroj: %s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "Rozšíření %(ext_name)s: nelze rozšířit %(collection)s: Žádný takový zdroj" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Rozšíření %(ext_name)s: rozšiřování zdroje %(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "JSON nelze porozumět" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "příliš mnoho klíčů těla" + +#: cinder/api/openstack/wsgi.py:581 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "Rozšířený zdroj: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "Vyvolána chyba: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Vyvolána výjimka HTTP: %s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "V požadavku zadán nerozpoznaný Content-Type" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "V požadavku nezadán Content-Type" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "V požadavku zadáno prázdné tělo" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "Žádná taková činnost: %s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "Poškozené tělo požadavku" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Nepodporovaný Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Poškozená url požadavku" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s vrátilo chybu: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Pouze %(value)s požadavky %(verb)s mohou být provedeny pro %(uri)s " +"každých %(unit_string)s." + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "Tento požadavek má omezen množství." + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "Instance neexistuje" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "Položka metadat nenalezena" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "Server neexistuje" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "Zadáno neplatné imageRef." + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Vytvořit svazek o %s GB" + +#: cinder/api/v1/volumes.py:418 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" + +#: cinder/api/v2/volumes.py:359 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Instance neexistuje" + +#: cinder/backup/services/swift.py:127 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "Instance neexistuje" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "Očekáván objekt typu: %s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "Požadavek je neplatný." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "Původní výjimka je zahozena" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Získán semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:199 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Pokus o získání zámku souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:226 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:234 +#, fuzzy, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "zařízení záznamu systému musí být jedno z: %s" + +#: cinder/openstack/common/log.py:537 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Třída %(fullname)s je zastaralá: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +#, fuzzy +msgid "in fixed duration looping call" +msgstr "v opakujícím volání" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +#, fuzzy +msgid "in dynamic looping call" +msgstr "v opakujícím volání" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Úplná sada PŘÍZNAKŮ:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "pro zprávu není metoda: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Pro zprávu není metoda: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID je %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Neplatné znovu použití připojení RPC." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Volajícímu je vrácena výjimka: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, fuzzy, python-format +msgid "Deserializing: %s" +msgstr "Popis roz: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Zrušení registrace obrazu %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "Požadavek je neplatný." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +#, fuzzy +msgid "Could not find another host" +msgstr "Nelze najít %s v požadavku." + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Neplatný snímek" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Odpojit svazek %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Zachycena chyba: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, fuzzy, python-format +msgid "Message : %(message)s" +msgstr "%(code)s: %(message)s" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +#, fuzzy +msgid "JSON Error" +msgstr "Chyba přesunu" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "odpověď %s" + +#: cinder/volume/drivers/coraid.py:199 +#, fuzzy, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Nelze restartovat instanci" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, fuzzy, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "skupina %s již existuje" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "značka [%s] nenalezena" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Neplatný snímek" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "odpověď %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Nelze vytvořit typ instance" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "Instance neexistuje" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, fuzzy, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/volume/drivers/storwize_svc.py:574 +#, fuzzy, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Příkaz: %(cmd)s\n" +"Kód ukončení: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Instance nenalezena" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Chyba v přesunu %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +#, fuzzy +msgid "Storage type not found." +msgstr "Obraz nenalezen" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +#, fuzzy +msgid "Masking View not found." +msgstr "Obraz nenalezen" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +#, fuzzy +msgid "Ecom user not found." +msgstr "Server nenalezen." + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +#, fuzzy +msgid "Ecom server not found." +msgstr "Server nenalezen." + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Znovu připojeno k frontě" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Role %(role_id)s nemohla být nalezena." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "Chyba v přesunu %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, fuzzy, python-format +msgid "create_export: volume name:%s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, fuzzy, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Znovu připojeno k frontě" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +#, fuzzy +msgid "Bad response from server" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "odpověď %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "skupina %s již existuje" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/da/LC_MESSAGES/cinder.po b/cinder/locale/da/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..cb3418a31a --- /dev/null +++ b/cinder/locale/da/LC_MESSAGES/cinder.po @@ -0,0 +1,5573 @@ +# Danish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-01-15 21:46+0000\n" +"Last-Translator: Soren Hansen \n" +"Language-Team: Danish \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "bind %s: slettet" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/de/LC_MESSAGES/cinder.po b/cinder/locale/de/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..d0ded78bb7 --- /dev/null +++ b/cinder/locale/de/LC_MESSAGES/cinder.po @@ -0,0 +1,5584 @@ +# German translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:23+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: German \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Alle vorhandenen FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Hole %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Führe Kommando (subprocess) aus: %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Ergebnis war %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Alle vorhandenen FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID ist %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Volume %s: wird entfernt" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "Volume %s: erfolgreich erstellt" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "Volume %s: wird entfernt" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "Volume %s: entferne Export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/en_AU/LC_MESSAGES/cinder.po b/cinder/locale/en_AU/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..edf412a44e --- /dev/null +++ b/cinder/locale/en_AU/LC_MESSAGES/cinder.po @@ -0,0 +1,5605 @@ +# English (Australia) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-10-21 11:27+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: English (Australia) \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "no method for message: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "group %s already exists" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backend: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Connection to libvirt broke" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Inner Exception: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "start address" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "NotFound raised: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Starting %(topic)s node (version %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Connecting to libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Secret Key change for user %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Caught error: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: snapshotting" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "volume group %s doesn't exist" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "volume group %s doesn't exist" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Unable to detach volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Got exception: %s" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "received %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "no method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "No method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "De-registering image %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake does not have an implementation for %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Detach volume %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Caught error: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume is not local to this node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: snapshotting" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "response %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Going to start terminating instances" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "group %s already exists" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Create volume of %s GB" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd has no pool %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "instance %s: snapshotting" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "response %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Going to start terminating instances" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: skipping export" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Deleting user %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Error starting xvp: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconnected to queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Running instances: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconnected to queue" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Running instances: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "response %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "group %s already exists" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/en_GB/LC_MESSAGES/cinder.po b/cinder/locale/en_GB/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..0e31102dab --- /dev/null +++ b/cinder/locale/en_GB/LC_MESSAGES/cinder.po @@ -0,0 +1,5605 @@ +# English (United Kingdom) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-30 11:10+0000\n" +"Last-Translator: Anthony Harrington \n" +"Language-Team: English (United Kingdom) \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Connection to glance failed" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Not authorised." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "User does not have admin privileges" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Policy doesn't allow %(action)s to be performed." + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Unacceptable parameters." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Invalid snapshot" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Volume %(volume_id)s is not attached to anything" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Failed to load data into json format" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Invalid input received" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Invalid volume type" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Invalid volume" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Invalid content type %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "An unknown exception occurred." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "An unknown exception occurred." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backend: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Connection to glance failed" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Inner Exception: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Full set of FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to destroy VBD %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to locate volume %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to destroy VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: snapshotting" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume status must be available" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "The request is invalid." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Unable to detach volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to detach volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to detach volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Full set of FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "Received %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "The request is invalid." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake does not have an implementation for %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Invalid snapshot" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume %s: deleting" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume is not local to this node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: snapshotting" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Unable to detach volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Invalid snapshot" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Failed to decrypt text" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: skipping export" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Link Local address is not found.:%s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested return %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unexpected error while running command." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/en_US/LC_MESSAGES/cinder.po b/cinder/locale/en_US/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..52a35261c4 --- /dev/null +++ b/cinder/locale/en_US/LC_MESSAGES/cinder.po @@ -0,0 +1,5977 @@ +# English (United States) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: markmc \n" +"Language-Team: en_US \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "Arguments dropped when creating context: %s" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "read_deleted can only be one of 'no', 'yes' or 'only', not %r" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Connection to glance failed" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Not authorized." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "User does not have admin privileges" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Policy doesn't allow %(action)s to be performed." + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Not authorized for image %(image_id)s." + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Unacceptable parameters." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Invalid snapshot" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Volume %(volume_id)s is still attached, detach volume first." + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Failed to load data into json format" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "The results are invalid." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Invalid input received" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Invalid volume type" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Invalid volume" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Invalid content type %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "Invalid Parameter: Unicode is not supported by the current database." + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "Service is unavailable at this time." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Image %(image_id)s is unacceptable: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "Expected a uuid but received %(uuid)." + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "Resource could not be found." + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "Volume %(volume_id)s persistence file could not be found." + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Volume %(volume_id)s could not be found." + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Unable to locate account %(account_name)s on Solidfire device" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Volume not found for instance %(instance_id)s." + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Volume %(volume_id)s has no metadata with key %(metadata_key)s." + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "Invalid metadata" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "Invalid metadata size" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Volume type %(volume_type_id)s could not be found." + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Volume type with name %(volume_type_name)s could not be found." + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Snapshot %(snapshot_id)s could not be found." + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "deleting volume %(volume_name)s that has snapshot" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "deleting snapshot %(snapshot_name)s that has dependent volumes" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "No target id found for volume %(volume_id)s." + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Failed to create iscsi target for volume %(volume_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "Failed to attach iSCSI target for volume %(volume_id)s." + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Failed to remove iscsi target for volume %(volume_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "No disk at %(location)s" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Invalid image href %(image_href)s." + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Image %(image_id)s could not be found." + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Service %(service_id)s could not be found." + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Host %(host)s could not be found." + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Scheduler Host Filter %(filter_name)s could not be found." + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Scheduler Host Weigher %(weigher_name)s could not be found." + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Could not find binary %(binary)s on host %(host)s." + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "Invalid reservation expiration %(expire)s." + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "Quota could not be found" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "Unknown quota resources %(unknown)s." + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Quota for project %(project_id)s could not be found." + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Quota class %(class_name)s could not be found." + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Quota usage for project %(project_id)s could not be found." + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Quota reservation %(uuid)s could not be found." + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Quota exceeded for resources: %(overs)s" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Migration %(migration_id)s could not be found." + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Migration not found for instance %(instance_id)s with status %(status)s." + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "File %(file_path)s could not be found." + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Class %(class_name)s could not be found: %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "Action not allowed." + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "Key pair %(key_name)s already exists." + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Volume Type %(id)s already exists." + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "Migration error" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Malformed message body: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Could not find config at %(path)s" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Could not load paste app '%(name)s' from %(path)s" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "No valid host was found. %(reason)s" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Host %(host)s is not up or doesn't exist." + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "Quota exceeded" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "Requested volume or snapshot exceeds allowed Gigabytes quota" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "Maximum volume/snapshot size exceeded" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "Maximum number of volumes allowed (%(allowed)d) exceeded" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "Maximum number of snapshots allowed (%(allowed)d) exceeded" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Detected more than one volume with name %(vol_name)s" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "3PAR Host already exists: %(err)s. %(info)s" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "Invalid 3PAR Domain: %(err)s" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "Bad response from SolidFire API" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Error in SolidFire API response: data=%(data)s" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "Unknown or unsupported command %(cmd)s" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Malformed response to command %(cmd)s: %(reason)s" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Bad HTTP response status %(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "Operation failed with status=%(status)s. Full dump: %(data)s" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create server object for initiator %(name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "Unable to find server object for initiator %(name)s" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "Unable to find any active VPSA controller" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "Failed to retrieve attachments for volume %(name)s" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "Invalid attachment info for volume %(name)s: %(reason)s" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Instance %(instance_id)s could not be found." + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "Bad or unexpected response from the storage volume backend API: %(data)s" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "Unknown NFS exception" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "No mounted NFS shares found" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "There is no share which can host %(volume_size)sG" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "Unknown Gluster exception" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "No mounted Gluster shares found" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "Failed to copy image to volume" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Backup %(backup_id)s could not be found." + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backup: %(reason)s" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "Connection to swift failed" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "Deploy v1 of the Cinder API. " + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "Deploy v2 of the Cinder API. " + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Running periodic task %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Error during %(full_task_name)s: %(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Notifying Schedulers of capabilities ..." + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "JSON file representing policy" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Rule checked when requested rule is not found" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "Created reservations %(reservations)s" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "Failed to commit reservations %(reservations)s" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "Failed to roll back reservations %(reservations)s" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "SIGTERM received" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "Child %(pid)d exited with status %(code)d" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "pid %d not in child list" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "_wait_child %d" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "wait wrap.failed %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Starting %(topic)s node (version %(version_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "Creating Consumer connection for Service %s" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Full set of FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s : FLAG SET " + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Got unknown keyword args to utils.execute: %r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r failed. Retrying." + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "Environment not supported over SSH" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "process_input not supported over SSH" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "Specify a password or private_key" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "Error connecting via ssh: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "in looping call" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "Expected object of type: %s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' took %(total_time).2f secs" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Could not remove tmpdir: %s" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "Unknown byte multiplier: %s" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find cert_file : %s" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find key_file : %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "Could not bind to %(host)s:%(port)s after trying for 30 seconds" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "Started %(name)s on %(_host)s:%(_port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Stopping WSGI server." + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "WSGI server has stopped." + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "limit param must be an integer" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "limit param must be positive" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "offset param must be an integer" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "offset param must be positive" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "marker [%s] not found" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s does not contain version" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "Initializing extension manager." + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "Loaded extension: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "Ext name: %s" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "Ext alias: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "Ext description: %s" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "Ext namespace: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "Ext updated: %s" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "Exception loading extension: %s" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "Loading extension %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "Calling extension factory %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "osapi_volume_extension is set to deprecated path: %s" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Failed to load extension %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Failed to load extension %(classpath)s: %(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Failed to load extension %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "element is not a child" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "root element selecting a list" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "subclasses must implement construct()!" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "Updating %(resource)s '%(id)s' with '%(update)r'" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "show called for member %s" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "delete called for member %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "Delete backup with id: %s" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "Creating new backup %s" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "Incorrect request body format" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "Creating backup of volume %(volume_id)s in container %(container)s" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "Restoring backup %(backup_id)s (%(body)s)" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "Restoring backup %(backup_id)s to volume %(volume_id)s" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "Snapshot not found." + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "cannot understand XML" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "Host '%s' could not be found." + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Invalid status: '%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Invalid update setting: '%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Setting host %(host)s to %(state)s." + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource is admin only functionality" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "Host not found" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "Quota limit must be -1 or greater." + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "Request body empty" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "Request body and URI mismatch" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "Request body contains too many items" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "Invalid request body" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "No image_name was specified in request." + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Caught error: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s returned with HTTP %(status)d" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "Request is too large." + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "Must specify an ExtensionManager class" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "Extended resource: %s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Extension %(ext_name)s extending resource: %(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "cannot understand JSON" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "too many body keys" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "Exception handling resource: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "Fault thrown: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP exception thrown: %s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "Unrecognized Content-Type provided in request" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "No Content-Type provided in request" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "Empty body provided in request" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "There is no such action: %s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "Malformed request body" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Unsupported Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Malformed request url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s returned a fault: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "This request was rate-limited." + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "snapshot does not exist" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "Metadata item was not found" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "Delete snapshot with id: %s" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Create snapshot from volume %s" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "Invalid value '%s' for force. " + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "volume does not exist" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "Delete volume with id: %s" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "Invalid imageRef provided." + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Removing options '%(bad_options)s' from query" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "Removing options '%s' from query" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "Backup status must be available or error" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "Volume to be backed up must be available" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "Backup status must be available" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "Backup to be restored has invalid size" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "Creating volume of %(size)s GB for restore of backup %(backup_id)s" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "Volume to be restored to must be available" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "Volume to be restored to is smaller than the backup to be restored" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "Cleaning up incomplete backup operations" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "Resetting volume %s to available (was backing-up)" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "Resetting volume %s to error_restoring (was restoring-backup)" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "Resetting backup %s to error (was creating)" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "Resetting backup %s to available (was restoring)" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "Resuming delete on backup: %s" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "create_backup finished. backup: %s" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "delete_backup started, backup: %s" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "delete_backup finished, backup %s deleted" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "unsupported compression algorithm: %s" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "_check_container_exists: container: %s" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "container %s does not exist" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "container %s exists" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "_create_container started, container: %(container)s,backup: %(backup_id)s" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "_generate_swift_object_name_prefix: %s" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "generated object list: %s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "_write_metadata finished" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "_read_metadata finished (%s)" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "volume size %d is invalid." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "reading chunk of data from volume" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "not compressing data" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "About to put_object" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "swift MD5 for %(object_name)s: %(etag)s" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "backup MD5 for %(object_name)s: %(md5)s" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "Calling eventlet.sleep(0)" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "backup %s finished." + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "v1 swift volume backup restore of %s started" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "metadata_object_names = %s" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "decompressing data using %s algorithm" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "v1 swift volume backup restore of %s finished" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "Restoring swift backup version %s" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "No support to restore swift backup version %s" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "restore %(backup_id)s to %(volume_id)s finished." + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "swift error while listing objects, continuing with delete" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "swift error while deleting object %s, continuing with delete" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "deleted swift object: %(swift_object_name)s in container: %(container)s" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "delete %s finished" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "Creating iscsi_target for: %s" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "Failed to create iscsi target for volume id:%(vol_id)s." + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "Removing iscsi_target for: %s" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Failed to remove iscsi target for volume id:%(vol_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "valid iqn needed for show_target" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "Removing iscsi_target for volume: %s" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "rtstool is not installed correctly" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "Creating iscsi_target for volume: %s" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "Removing iscsi_target: %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "Failed to add initiator iqn %s to target" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Unable to find group: %(group)s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Fail to create volume %(volname)s" + +#: cinder/brick/local_dev/lvm.py:75 +#, fuzzy +msgid "Error creating Volume Group" +msgstr "error refreshing volume stats" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, fuzzy, python-format +msgid "StdOut :%s" +msgstr "casted to %s" + +#: cinder/brick/local_dev/lvm.py:78 +#, fuzzy, python-format +msgid "StdErr :%s" +msgstr "casted to %s" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Fail to create volume %(volname)s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "Id not in sort_keys; is sort_keys unique?" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "Unknown sort direction, must be 'desc' or 'asc'" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "Unrecognized read_deleted value '%s'" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "Change will make usage less than 0 for the following resources: %(unders)s" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "No backend config with id %(sm_backend_id)s" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "No sm_flavor called %(sm_flavor)s" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "No sm_volume with id %(volume_id)s" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "No backup with id %(backup_id)s" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "version should be an integer" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "Upgrade DB using Essex release first." + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "Got mysql server has gone away: %s" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL connection failed. %s attempts left." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "Exception while creating table." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "Downgrade from initial Cinder install is unsupported." + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "Table |%s| not created!" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "quota_classes table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "quota_usages table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "reservations table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "Exception while creating table 'volume_glance_metedata'" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "volume_glance_metadata table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "backups table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "snapshot_metadata table not dropped" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "Snapshot list encountered but no header found!" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info' parsing failed." + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "fmt=%(fmt)s backed by:%(backing_file)s" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Converted to raw, but format is now %s" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "Converted to %(f1)s, but format is now %(f2)s" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "Uncaught exception" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "Could not release the acquired lock `%s`" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "Deprecated: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "Error loading logging config %(log_config)s: %(err_msg)s" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "syslog facility must be one of: %s" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Fatal call to deprecated config: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "task run outlasted interval by %s sec" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "in fixed duration looping call" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "Dynamic looping call sleeping for %.02f seconds" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "in dynamic looping call" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "Failed to understand rule %(match)r" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "Inheritance-based rules are deprecated; update _check_%s" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "No handler for matches of kind %s" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "Full set of CONF:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s not in valid priorities" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "Failed to load notifier %s. These notifications will not be sent." + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "Could not send notification to %(topic)s. Payload=%(message)s" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "Pool creating new connection" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "no calling threads waiting for msg_id : %s, message : %s" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "UNIQUE_ID is %s." + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "no method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "No method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "Expected exception during message handling (%s)" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "Making synchronous call on %s ..." + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "Making asynchronous cast on %s..." + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "Making asynchronous fanout cast..." + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "Sending %(event_type)s on %(topic)s" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "An unknown RPC related exception occurred." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "Found duplicate message(%(msg_id)s). Skipping it." + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "Specified RPC version, %(version)s, not supported by this endpoint." + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "Failed to sanitize %(item)s. Key error %(err)s" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "Timed out waiting for RPC response: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "Re-established AMQP queues" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "Deserializing: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "Connecting to %(addr)s with %(type)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "-> Subscribed to %(subscribe)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "-> bind: %(bind)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "Could not open socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "Subscribing to %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "You cannot recv on this socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "You cannot send on this socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "Running func with context: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "Sending reply" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "Out reactor registered" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "CONSUMER GOT %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "Topic contained dangerous characters." + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "ROUTER RELAY-OUT SUCCEEDED %(data)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "ROUTER RELAY-OUT QUEUED %(data)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "Could not create IPC directory %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "CONSUMER RECEIVED DATA: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "ROUTER RELAY-OUT %(data)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "Consumer is a zmq.%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "Creating payload" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "Creating queue socket for reply waiter" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "Sending cast" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "Cast sent; Waiting reply" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "Received message: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "Unpacking response" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "Unsupported or unknown ZMQ envelope returned." + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "RPC Message Invalid." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "%(msg)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "Sending message(s) to: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "No matchmaker results. Not casting." + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "No match from matchmaker." + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "Match not found by MatchMaker." + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "Matchmaker does not implement registration or heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "Matchmaker unregistered: %s, %s" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "Register before starting heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "No key defining hosts for topic '%s', see ringfile" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "Is the appropriate service running?" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "Could not find another host" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "Must implement schedule_create_volume" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "Invalid value for 'scheduler_max_attempts', must be >=1" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "Filtered %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "Choosing %(best_host)s" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "Ignoring %(service_name)s service update from %(host)s" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Received %(service_name)s service update from %(host)s." + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "service is down or disabled." + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Failed to schedule_%(method)s: %(ex)s" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "Could not stat scheduler options file %(filename)s: '%(e)s'" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "Could not decode scheduler options: '%(e)s'" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "Not enough allocatable volume gigabytes remaining" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "Free capacity not set: volume node info collection broken." + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Faking execution of cmd (subprocess): %s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "Faked command matched %s" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "Faked command raised an exception %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"The following migrations are missing a downgrade:\n" +"\t%s" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "unrecognized argument %s" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "Run CLI command: %s" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "Given data: %s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "Result data: %s" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "Invalid input" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "volume: %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "Authentication error" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "Authorization error" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "Item not found" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Doing %(method)s on %(relative_url)s" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "Body: %s" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "Unexpected status code" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "Decoding JSON: %s" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "May specify only one of snapshot, imageRef or source volume" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "status must be available" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "Volume size cannot be lesser than the Snapshot size" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "Unable to clone volumes that are in an error state" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "Clones currently must be >= original volume size." + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "Volume size '%s' must be an integer and greater than 0" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "Size of specified image is larger than volume size." + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "Image minDisk size is larger than the volume size." + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "Failed to update quota for deleting volume" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "Volume status must be available or error" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "Volume still has %d dependent snapshots" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "Searching by: %s" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "already attached" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "already detached" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available to reserve" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "must be available" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "Volume Snapshot status must be available or error" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "Metadata property key blank" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "Metadata property key greater than 255 characters" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "Metadata property value greater than 255 characters" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available/in-use." + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "Volume status is in-use." + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "ISCSI provider_location not stored, using discovery" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery: Found %s" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "copy_image_to_volume %s." + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "copy_volume_to_image %s." + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "iSCSI device not found at %s" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "Found iSCSI node %(host_device)s (after %(tries)s rescans)" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "Updating volume status" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "Driver must implement initialize_connection" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "Driver path %s is deprecated, update your configuration to the new path." + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "volume %s stuck in a downloading state" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "Resuming any in progress delete operations" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "Resuming delete on volume: %s" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creating from snapshot" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creating from existing volume" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creating from image" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "volume %s: create failed" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "Error: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "volume %s: Error trying to reschedule create" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "Retry info not present, will not reschedule" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "No request spec, will not reschedule" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "volume is not local to this node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "volume %s: volume is busy" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "Failed to update usages deleting volume" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "snapshot %s: creating" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "snapshot %(snap_name)s: creating" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "snapshot %s: created successfully" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "snapshot %s: deleting" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "snapshot %s: snapshot is busy" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "Failed to update usages deleting snapshot" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "snapshot %s: deleted successfully" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "being attached by another instance" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "Downloaded image %(image_id)s to %(volume_id)s successfully" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "Clear capabilities" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "Notification {%s} received" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "DB error: %s" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "id cannot be None" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "name cannot be None" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" +"Default volume type is not found, please check default_volume_type " +"config: %s" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "Running with CoraidDriver for ESM EtherCLoud" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "Update session cookie %(session)s" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "Message : %(message)s" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "Error while trying to set group: %(message)s" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "Unable to find group: %(group)s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "ESM urlOpen error" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "JSON Error" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "Request without URL" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "Configure data : %s" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "Configure response : %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "Unable to retrive volume infos for volume %(volname)s" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "Cannot login on Coraid ESM" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Fail to create volume %(volname)s" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Failed to delete volume %(volname)s" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Failed to Create Snapshot %(snapname)s" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "Failed to Delete Snapshot %(snapname)s" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "Failed to Create Volume from Snapshot %(snapname)s" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "There's no Gluster config file configured (%s)" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "Gluster config file at %(config)s doesn't exist" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "mount.glusterfs is not installed" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "casted to %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "Volume %s does not have provider_location specified, skipping" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "Exception during mounting %s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "%s is already mounted" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "Size for volume: %s not found, skipping secure delete." + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "Performing secure delete on volume: %s" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "Error unrecognized volume_clear option: %s" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "snapshot: %s not found, skipping delete operations" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "Creating clone of volume: %s" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "Skipping ensure_export. No iscsi_target provision for volume: %s" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "Skipping ensure_export. No iscsi_target provisioned for volume: %s" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "Detected inconsistency in provider_location id" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "Symbolic link %s not found" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "Skipping remove_export. No iscsi_target provisioned for volume: %s" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "Error retrieving volume status: " + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "There's no NFS config file configured (%s)" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "NFS config file at %(config)s doesn't exist" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd has no pool %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "error refreshing volume stats" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "Not stored in rbd" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "Blank components" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "Not an rbd snapshot" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "%s is in a different ceph cluster" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "Unable to read image %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "Value required for 'scality_sofs_config'" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "Cannot access 'scality_sofs_config': %s" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "Cannot execute /sbin/mount.sofs" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "Cannot mount Scality SOFS, check syslog for errors" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "Cannot find volume dir for Scality SOFS at '%s'" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "Payload for SolidFire API call: %s" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "Call to json.loads() raised an exception: %s" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "Results of SolidFire API call: %s" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "Clone operation encountered: %s" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "Waiting for outstanding operation before retrying snapshot: %s" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "Detected xDBVersionMismatch, retry %s of 5" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "API response: %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "Found solidfire account: %s" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire account: %s does not exist, create it..." + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "Failed to get model update from clone" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "More than one valid preset was detected, using %s" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "Volume %s, not found on SF Cluster." + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "Found %(count)s volumes mapped to id: %(uuid)s." + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "Enter SolidFire delete_volume..." + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "Account for Volume ID %s was not found on the SolidFire Cluster!" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "This usually means the volume was never succesfully created." + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "Volume ID %s was not found on the SolidFire Cluster!" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "Leaving SolidFire delete_volume" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "Executing SolidFire ensure_export..." + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "Executing SolidFire create_export..." + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "Updating cluster status info" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "Failed to get updated stats" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "WWPN on node %(node)s: %(wwpn)s" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "enter: do_setup" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "pool %s doesn't exist" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "Failed to get license information." + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "do_setup: No configured nodes" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "leave: do_setup" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "enter: check_for_setup_error" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "%s is not set" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "leave: check_for_setup_error" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "ensure_export: Volume %s not found on storage" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "enter: _get_chap_secret_for_host: host name %s" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "_create_host: Cannot clean host name. Host name is not unicode or string" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "enter: _get_host_from_connector: prefix %s" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "leave: _get_host_from_connector: host %s" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "enter: _create_host: host %s" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "_create_host: No connector ports" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "leave: _create_host: host %(host)s - %(host_name)s" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: creating from snapshot" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "enter: _delete_host: host %s " + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "leave: _delete_host: host %s " + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "enter: initialize_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "_create_host failed to return the host name." + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "initialize_connection: Failed to get attributes for volume %s" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "Did not find expected column name in lsvdisk: %s" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "initialize_connection: Missing volume attribute for volume %s" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "initialize_connection: Did not find a preferred node for volume %s" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "enter: terminate_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "_get_host_from_connector failed to return the host name for connector" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "leave: terminate_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "protocol must be specified as ' iSCSI' or ' FC'" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "enter: _create_vdisk: vdisk %s " + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "leave: _create_vdisk: volume %s " + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "_prepare_fc_map: %s" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "_create_copy: Source vdisk %s does not exist" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "enter: _get_flashcopy_mapping_attributes: mapping %s" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "enter: _is_vdisk_defined: vdisk %s " + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "enter: _delete_vdisk: vdisk %s" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "warning: Tried to delete vdisk %s but it does not exist." + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "leave: _delete_vdisk: vdisk %s" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "create_volume_from_snapshot: Source and destination size differ." + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "create_cloned_volume: Source and destination size differ." + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "_update_volume_status: Could not get system name" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "Could not get pool data from the storage" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "_update_volume_status: Could not get storage pool data" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "System does not support compression" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "If compression is set to True, rsize must also be set (not equal to -1)" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "enter: _execute_command_and_parse_attributes: command %s" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "Did not find expected column in %(fun)s: %(hdr)s" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "Creating folder %s " + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "Ignored target creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "Disk not found: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "Sending %(method)s to %(url)s. Body \"%(body)s\"" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "Operation completed. %(data)s" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Volume %(name)s could not be found. It might be already deleted" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "Attach properties: %(properties)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "Entering create_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "Create Volume: %(volume)s Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "Create Volume: %(volume)s Storage type: %(storage_type)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "Create Volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "Leaving create_volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "Entering create_volume_from_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "Entering create_cloned_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "Entering delete_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "Delete Volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "Volume %(name)s not found on the array. No volume to delete." + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "Delete Volume: %(name)s DeviceID: %(deviceid)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "Entering create_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Create snapshot: %(snapshot)s: volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "Cannot find Replication Service to create snapshot for volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "Entering delete_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Delete Snapshot: %(snapshot)s: volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "Create export: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "create_export: Volume: %(volume)s Device ID: %(device_id)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "Error mapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ExposePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "Error unmapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "HidePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "Error mapping volume %(vol)s. %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "AddMembers for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "Error unmapping volume %(vol)s. %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "RemoveMembers for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "Map volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "Cannot find Controller Configuration Service for storage system %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "Unmap volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "Volume %s is not mapped. No volume to unmap." + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "Initialize connection: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "Volume %s is already mapped." + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "Terminate connection: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "Found Storage Type: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "Storage type not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "Found Masking View: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "Masking View not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "Ecom user not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "Ecom server not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "Cannot connect to ECOM server" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "Found Replication Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "Found Storage Configuration Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "Found Controller Configuration Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "Found Storage Hardware ID Management Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Pool %(storage_type)s is not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "Storage system not found for pool %(storage_type)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "Pool: %(pool)s SystemName: %(systemname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "Pool name: %(poolname)s System name: %(systemname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Volume %(volumename)s not found on the array." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Volume name: %(volumename)s Volume instance: %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "Source: %(volumename)s Target: %(snapshotname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "Error finding %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "Found %(name)s: %(initiator)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "Available device number on %(storage)s: %(device)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "Device number not found for volume %(volumename)s %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "Device info: %(data)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "Found Storage Processor System: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "Error finding Storage Hardware ID Service." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "Error finding Target WWNs." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "Add target WWN: %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "Target WWNs: %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "Cannot find device number for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Found iSCSI endpoint: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "ISCSI properties: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "read timed out" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "do_setup." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "check_for_setup_error." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "check_for_setup_error: Can not get device type." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +#, fuzzy +msgid "_get_device_type: Storage Pool must be configured." +msgstr "_get_device_type: Storage Pool must beconfigured." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "create_volume:volume name: %s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "delete_volume: volume name: %s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, fuzzy, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "delete_volume:No need to delete volume.Volume %(name)s does not exist." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "create_export: volume name:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "create_export:Volume %(name)s does not exist." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, fuzzy, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" +"initialize_connection:Failed to find target ipfor " +"initiator:%(initiatorname)s,please check config file." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, fuzzy, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "initialize_connection:Failed to find target iSCSIiqn. Target IP:%(ip)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, fuzzy, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" +"initialize_connection:host name: %(host)s,initiator name: %(ini)s, " +"hostport name: %(port)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "terminate_connection:Host does not exist. Host name:%(host)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, fuzzy, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "terminate_connection:volume does not exist.volume name:%(volume)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, fuzzy, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s,volume name:%(volumename)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "create_snapshot:Device does not support snapshot." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "create_snapshot:Resource pool needs 1GB valid size at least." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, fuzzy, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "create_snapshot:Volume does not exist.Volume name:%(name)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, fuzzy, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "create_snapshot:Snapshot does not exist.Snapshot name:%(name)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "delete_snapshot:Device does not support snapshot." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, fuzzy, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "create_volume_from_snapshot:Snapshot does not exist.Snapshot name:%(name)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "Config file is wrong. Controler IP, UserName and UserPassword must be set." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "_check_conf_file: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "_read_xml:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, fuzzy, python-format +msgid "Write login information to xml error. %s" +msgstr "Write login informationto xml error. %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "_get_login_info error. %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, fuzzy, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "_get_lun_set_info:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +#, fuzzy +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" +"_get_maximum_pool:maxpoolid is None.Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "_get_iscsi_info:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "CLI command:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "_execute_cli:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, fuzzy, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" +"_add_host:Failed to add host to hostgroup.host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, fuzzy, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "_add_initiator:Failed to add initiator.initiator name:%(name)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, fuzzy, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" +"_delete_initiator:ERROE:Failed to delete initiator.initiator " +"name:%(name)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, fuzzy, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" +"_add_hostport:Failed to add hostport. port name:%(port)sport " +"information:%(info)s host id:%(host)sout:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "_delete_hostport:Failed to delete host port. port id:%(portid)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "_get_tgt_iqn:iSCSI IP is %s." + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "_get_tgt_iqn:iSCSI target iqn is:%s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, fuzzy, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" +"_map_lun:Failed to add " +"hostmap.hostid:%(host)slunid:%(lun)shostlunid:%(hostlunid)s.out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, fuzzy, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "_delete_host: Failed delete host.host id:%(hostid)s.out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, fuzzy, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" +"_wait_for_luncopy:LUNcopy status isnot normal. LUNcopy " +"name:%(luncopyname)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, fuzzy, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" +"_change_lun_controller:Failed to change lun owningcontroller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +#, fuzzy +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "_is_resource_pool_enough:Resource pool for snapshotnot be added." + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "API %(name)s failed: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "Using WSDL: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "Using DFM server: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "Using storage service: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "Using storage service prefix: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "Using vfiler: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "Either netapp_storage_service or netapp_storage_service_prefix must be set" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "Connected to DFM server" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "Job failed: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "Failed to provision dataset member" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "No LUN was created by the provision job" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "No entry in LUN table for volume %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "Failed to remove and delete dataset LUN member" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "Failed to remove and delete dataset Qtree member" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "No entry in LUN table for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Failed to get LUN details for LUN ID %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Failed to get host details for host ID %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "No LUN ID for volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Failed to get target portal for filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Failed to get target IQN for filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "No metadata property %(prop)s defined for the LUN %(name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "Success getting LUN list from server" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "Created LUN with name %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "Destroyed LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "Mapped LUN %(handle)s to the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Failed to get LUN target details for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Failed to get target portal for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Failed to get target IQN for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "No entry in LUN table for snapshot %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "Object is not a NetApp LUN." + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "Cloned LUN with new name %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Could not find handle for LUN named %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "Using NetApp filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "No entry in LUN table for volume/snapshot %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "Mapped LUN %(name)s to the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "Snapshot %s deletion successful" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "Unmapped LUN %(name)s from the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get vol with required size for volume: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "Error mapping lun. Code :%(code)s, Message:%(message)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "Error unmapping lun. Code :%(code)s, Message:%(message)s" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Could not find attribute for LUN named %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "No iscsi service found for vserver %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "No clonned lun named %s found on the filer" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "Error finding luns for volume %(vol)s. Verify volume exists." + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "Clone operation with src %(name)s and dest %(new_name)s completed" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "Clone operation with src %(name)s and dest %(new_name)s failed" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "No interface found on cluster for ip %s" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "No storage path found for export path %s" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "Cloning with src %(src_path)s, dest %(dest_path)s" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "Nexenta SA returned the error" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "Sending JSON data: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "Auto switching to HTTPS connection to %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "No headers in server response" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "Bad response from server" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "Got response: %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Volume %s does not exist in Nexenta SA" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "Ignored target group creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "Ignored target group member addition error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "Ignored LU creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "Ignored LUN mapping entry addition error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "CLIQ command returned %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Volume info: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "local_path not supported" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "Error running SSH command: %s" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "Specify san_password or san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip must be set" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "Cannot parse list-view output: %s" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID not found for %(zfs_poolname)s. Output=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "Error running ssh command: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "CPG (%s) doesn't exist on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "Volume (%s) already exists on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "Unexpected state while cloning %s" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "Login to 3PAR array invalid" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "The hostname must be called '%s'" + diff --git a/cinder/locale/es/LC_MESSAGES/cinder.po b/cinder/locale/es/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..69c0cb128b --- /dev/null +++ b/cinder/locale/es/LC_MESSAGES/cinder.po @@ -0,0 +1,5614 @@ +# Spanish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-10 06:08+0000\n" +"Last-Translator: Oscar Rosario \n" +"Language-Team: Spanish \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Coneccion con glance fallida" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "No Autorizado" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "El usuario no tiene privilegios de administrador" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "no hay método para el mensaje: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Parametros inaceptables" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Captura no valida" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Fallo al ingresar informacion en formato json" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "La petición es inválida." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "La petición es inválida." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Entrada invalida recibida" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Tipo de contenido invalido %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "El servicio no esta disponible en este momento" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "el grupo %s ya existe" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" +"No es posible crear el VDI en SR %(sr_ref)s para la instancia " +"%(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "backend inválido: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Coneccion con glance fallida" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Excepción interna: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "configurando la red del host" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "No encontrado: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de opciones (FLAGS):" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Obteniendo %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Ejecutando cmd (subprocesos): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "El resultado fue %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "corriendo cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Conectando a libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "Depuración de la devolución de llamada: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "No se encuentra la dirección del enlace local.:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "backend inválido: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Cambio de clave secreta para el usuario %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado error: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instancia %s: creando snapshot" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Crear volumen de %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La petición es inválida." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Reiniciando instancia %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Imposible desasociar volumen %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Obtenida excepción %s" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "recibido %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Conjunto completo de opciones (FLAGS):" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "recibido %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Reuso invalido de una coneccion RPC" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "contenido desempaquetado: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Des-registrando la imagen %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "recibido %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La petición es inválida." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake no tiene una implementación para %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Debe de implementar un horario de reserva" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Captura no valida" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Desasociar volumen %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: saltando exportación" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volumen %s: creado satisfactoriamente" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Capturado error: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volumen no local a este nodo" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instancia %s: creando snapshot" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Fallo a reinicia la instancia" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "el grupo %s ya existe" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Crear volumen de %s GB" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Captura no valida" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Fallo al suspender la instancia" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: saltando exportación" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Borrando usuario %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "No se encuentra la dirección del enlace local.:%s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Exportando de nuevo los volumenes %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Desasociar volumen %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconectado a la cola" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Ejecutando instancias: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconectado a la cola" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Ejecutando instancias: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "el grupo %s ya existe" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/fi_FI/LC_MESSAGES/cinder.po b/cinder/locale/fi_FI/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..87cea00182 --- /dev/null +++ b/cinder/locale/fi_FI/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Finnish (Finland) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Finnish (Finland) " +"(http://www.transifex.com/projects/p/openstack/language/fi_FI/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/fr/LC_MESSAGES/cinder.po b/cinder/locale/fr/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..2a22497847 --- /dev/null +++ b/cinder/locale/fr/LC_MESSAGES/cinder.po @@ -0,0 +1,5620 @@ +# French translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-06 14:54+0000\n" +"Last-Translator: EmmanuelLeNormand \n" +"Language-Team: French \n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "La connexion à Glance a échoué" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Non autorisé." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "L’utilisateur n'a pas les privilèges administrateur" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Pas de méthode pour le message : %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Paramètres inacceptables." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Snapshot invalide" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Le volume %(volume_id)s n'est lié à rien" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Échec du chargement des données au format JSON" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "La requête est invalide." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "La requête est invalide." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "Entrée invalide reçue" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Type de volume invalide" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Volume invalide" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Le type de contenu %(content_type)s est invalide" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "Le service est indisponible actuellement." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "le groupe %s existe déjà" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" +"Impossible de créer VDI sur SR %(sr_ref)s pour l'instance " +"%(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Backend invalide : %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "La connexion à Glance a échoué" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Exception interne : %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "adresse de départ" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Démarrage du noeud %(topic)s (version %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Service détruit sans entrée dans la base de données" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "L'objet du service de base de données à disparru, re-création en cours." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Récupération du modelle de connexion serveur terminée!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "Le modèle de serveur à disparu" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Ensemble de propriétés complet :" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Récupèration de %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Execution de la commande (sous-processus) : %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Le résultat était %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Execution de la cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Connexion à libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "Debug dans le rappel : %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "L'adresse du lien local n'a pas été trouvé :%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend invalide : %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "Vous devez implémenter __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Clef secrète changée pour l'utilisateur %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Erreur interceptée : %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Création d'un volume de %s Go" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La requête est invalide." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Redémarrage de l'instance %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Impossible de détacher le volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "L'utilisation d'une requête de contexte vide est dévalué" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Reçu exception : %s" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "%s reçu" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Ensemble de propriétés complet :" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "Contexte décompacté : %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "%s reçu" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID est %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Une exception inconnue s'est produite." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Réutilisation invalide d'une connexion RPC" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "Contexte décompacté : %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Dé-enregitrement de l'image %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "%s reçu" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La requête est invalide." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake n'a pas d'implémentation pour %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAUX ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Snapshot invalide" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Dé-montage du volume %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Récupération après une exécution erronée. Tentative numéro %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Ré-exportation de %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s : exportation évitée" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: crée avec succès" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Erreur interceptée : %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: suppression" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Le volume n'est pas local à ce noeud" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: suppression de l'exportation" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Échec du redémarrage de l'instance" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested renvoi %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "le groupe %s existe déjà" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Création d'un volume de %s Go" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd n'as pas de file %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Snapshot invalide" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog n'est pas actif" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Échec de la suspension de l'instance" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s : exportation évitée" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Suppression de l'utilisateur %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Ré-exportation de %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Erreur au démarrage xvp : %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconnection à la queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Instance actives : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconnection à la queue" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested renvoi %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Instance actives : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "le groupe %s existe déjà" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/it/LC_MESSAGES/cinder.po b/cinder/locale/it/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f65735c064 --- /dev/null +++ b/cinder/locale/it/LC_MESSAGES/cinder.po @@ -0,0 +1,5605 @@ +# Italian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-01 18:59+0000\n" +"Last-Translator: simone.sandri \n" +"Language-Team: Italian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "L'utente non ha i privilegi dell'amministratore" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "nessun metodo per il messaggio: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Parametri inaccettabili." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "La richiesta non è valida." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "La richiesta non è valida." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "E' stato ricevuto un input non valido" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Tipo del volume non valido" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Volume non valido" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Impossible creare il VDI su SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Eccezione interna: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "model server é scomparso" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Insieme di FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Prelievo %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Il risultato é %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Eseguendo cmd (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La richiesta non è valida." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Riavviando l'istanza %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Insieme di FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "contesto decompresso: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La richiesta non è valida." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "E' stato ricevuto un input non valido" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume %s: rimuovendo" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: creato con successo" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: rimuovendo" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume ancora collegato" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Impossibile sospendere l'istanza" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Riconnesso alla coda" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Riconnesso alla coda" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ja/LC_MESSAGES/cinder.po b/cinder/locale/ja/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..e5c816d40e --- /dev/null +++ b/cinder/locale/ja/LC_MESSAGES/cinder.po @@ -0,0 +1,5606 @@ +# Japanese translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "グループ %s は既に存在しています。" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s における VDI を作成できません" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "不正なバックエンドです: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "libvirtへの接続が切れています。" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "内側で発生した例外: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "開始アドレス" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "NotFound 発生: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "%(topic)s ノードを開始しています (バージョン %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "FLAGSの一覧:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "ファイルをフェッチ: %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "コマンド実行(subprocess): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "コマンド実行結果: %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "コマンド(SSH)を実行: %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "libvirt %s へ接続します。" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "コールバック中のデバッグ: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "リンクローカルアドレスが見つかりません: %s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "%(interface)s のローカルIPアドレスのリンクが取得できません:%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "不正なバックエンドです: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "バックエンドは %s です。" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "__call__ を実装しなければなりません" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "ボリューム %s を切断(detach)できません" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "例外 %s が発生しました。" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "受信: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "FLAGSの一覧:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "受信: %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "context %s をアンパックしました。" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "De-registering image: イメージ %s を登録解除します。" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "受信: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake には %s が実装されていません。" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "ボリューム %s の作成に成功しました。" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "エラー %s をキャッチしました。" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "ネストした戻り値: %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "グループ %s は既に存在しています。" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd にプール %s がありません。" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog が機能していません" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Deleting user: ユーザ %s を削除します。" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "xvp の開始中にエラー: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "キューに再接続しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "インスタンス %s は実行中です。" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "キューに再接続しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "ネストした戻り値: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "インスタンス %s は実行中です。" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "グループ %s は既に存在しています。" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ko/LC_MESSAGES/cinder.po b/cinder/locale/ko/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..6731edc57f --- /dev/null +++ b/cinder/locale/ko/LC_MESSAGES/cinder.po @@ -0,0 +1,5593 @@ +# Korean translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-12-16 04:42+0000\n" +"Last-Translator: Zhongyue Luo \n" +"Language-Team: Korean \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR에 대한 VDI 생성이 실패했습니다" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "인스턴스 %s를 재부팅합니다" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..c1f7387564 --- /dev/null +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Korean (South Korea) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) " +"(http://www.transifex.com/projects/p/openstack/language/ko_KR/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..22b4551c9b --- /dev/null +++ b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po @@ -0,0 +1,5604 @@ +# Brazilian Portuguese translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-02-06 21:07+0000\n" +"Last-Translator: Adriano Steffler \n" +"Language-Team: Brazilian Portuguese \n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Erro inesperado ao executar o comando." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de saída: %(exit_code)s\n" +"Saída padrão: %(stdout)r\n" +"Erro: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "sem método para mensagem: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "group %s já existe" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" +"Não é possível criar o VDI no SR %(sr_ref)s para a instância " +"%(instance_name)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Backend inválido: %s" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Exceção interna: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "NotFound lançado: %s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "servidor de modelo perdido" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de FLAGS:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Buscando %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Resultado foi %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Executando o comando (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "depuração em retorno de chamada: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Endereço para Link Local não encontrado: %s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Não foi possível atribuir um IP para o Link Local de %(interface)s :%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Não é possível destruir o VBD %s" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "O status do volume parece estar disponível" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Reiniciando a instância %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "recebido %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Conjunto completo de FLAGS:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "recebido %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Removendo o registro da imagem %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "recebido %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake não tem uma implementação para %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Desanexar volume %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exportando %s volumes" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: ignorando export" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: criado com sucesso" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Capturado o erro: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: removendo" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "O volume não pertence à este node" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removendo export" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Começando a terminar instâncias" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "group %s já existe" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Criar volume de %s GB" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Começando a terminar instâncias" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "volume %s: ignorando export" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de saída: %(exit_code)s\n" +"Saída padrão: %(stdout)r\n" +"Erro: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Apagando usuário %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Endereço para Link Local não encontrado: %s" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exportando %s volumes" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Desanexar volume %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconectado à fila" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconectado à fila" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "group %s já existe" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/ru/LC_MESSAGES/cinder.po b/cinder/locale/ru/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..bb1482b9fa --- /dev/null +++ b/cinder/locale/ru/LC_MESSAGES/cinder.po @@ -0,0 +1,5660 @@ +# Russian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-25 09:34+0000\n" +"Last-Translator: Eugene Marshal \n" +"Language-Team: Russian \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Неожиданная ошибка при выполнении команды." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Команда: %(cmd)s\n" +"Код выхода: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "Сбой соединения с glance" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "Не авторизировано." + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "Пользователь не имеет административных привилегий" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Политика не допускает выполнения %(action)s." + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Ядро не найдено для образа %(image_id)s." + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "Недопустимые параметры." + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "Недопустимый снимок" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Том %(volume_id)s никуда не присоединён" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "Ошибка загрузки данных в формат json" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "Недопустимый запрос." + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "Недопустимый запрос." + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "Недопустимый тип тома" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "Недопустимый том" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Недопустимый тип содержимого %(content_type)s." + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "В данный момент служба недоступна." + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Образ %(image_id)s недопустим: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "Ресурс не может быть найден." + +#: cinder/exception.py:229 +#, fuzzy, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "Том %(volume_id)s не найден." + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Том %(volume_id)s не найден." + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Не найден том для копии %(instance_id)s." + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "Недопустимые метаданные" + +#: cinder/exception.py:255 cinder/exception.py:268 +#, fuzzy +msgid "Invalid metadata size" +msgstr "Неправильный ключ метаданных" + +#: cinder/exception.py:259 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Копия %(instance_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Тип тома %(volume_type_id)s не может быть найден." + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Тип тома %(volume_type_id)s не имеет дополнительных особенностей с ключом" +" %(extra_specs_key)s." + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Снимок %(snapshot_id)s не может быть найден." + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "удаление тома %(volume_name)s, который имеет снимок" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "Отсутствует диск в %(location)s" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Недопустимый образ href %(image_href)s." + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Образ %(image_id)s не найден." + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Служба %(service_id)s не найдена." + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Узел %(host)s не найден." + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Узел сompute %(host)s не найден." + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "Квота не найдена" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:368 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:372 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:376 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Пользователь %(user_id)s не найден." + +#: cinder/exception.py:380 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Превышена квота" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Перемещение %(migration_id)s не найдено." + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Перемещение не найдено для копии %(instance_id)s в состоянии %(status)s." + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "Файл %(file_path)s не может быть найден." + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "Действие не разрешено." + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Тип тома %(name)s уже существует." + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "Ошибка перемещения" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Невозможно найти конфигурацию по адресу %(path)s" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Допустимый узел не найден. %(reason)s" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Узел %(host)s не работает или не существует." + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "Превышена квота" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Невозможно создать volume_type с именем %(name)s и спецификациями " +"%(extra_specs)s" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" + +#: cinder/exception.py:499 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Недопустимое состояние сервера: %(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Невозможно создать VDI на SR %(sr_ref)s для копии %(instance_name)s" + +#: cinder/exception.py:511 +#, fuzzy, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "Невозможно найти узел для копии %s" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Копия %(instance_id)s не найдена." + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Группа LDAP %(group_id)s не найдена." + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Недопустимый внутренний интерфейс: %s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "Сбой соединения с glance" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" +"Пропуск %(full_task_name)s, %(ticks_to_skip)s раз осталось, для " +"произведения следующего запуска" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Запуск повторяющегося задания %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Ошибка во время %(full_task_name)s: %(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, fuzzy, python-format +msgid "Caught %s, exiting" +msgstr "снимок %s: удаление" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "Вложенное исключение: %s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "установка сетевого узла" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Запуск узла сети (версия %(vcs_string)s) %(topic)s" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "Служба завершила работу из-за отсутствия записи базы данных" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "Полный набор ФЛАГОВ:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "Получение %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Приняты неизвестные аргументы ключевого слова для utils.execute: %r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Выполнение команды (субпроцесс): %s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "Результат %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r ошибка. Выполняется повтор." + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Выполнение команды (SSH): %s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "Среда не поддерживается с использованием SSH" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "process_input не поддерживается с использованием SSH" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +#, fuzzy +msgid "Specify a password or private_key" +msgstr "Задайте san_password или san_private_key" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Подключение к libvirt: %s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "отладка в обратном вызове: %s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "Недопустимый внутренний интерфейс: %s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "внутренний интерфейс %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "Ожидался объект типа: %s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' заняла %(total_time).2f с." + +#: cinder/utils.py:1105 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Ошибка удаления контейнера: %s" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Невозможно найти адрес %r" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Невозможно найти адрес %r" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Невозможно найти адрес %r" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, fuzzy, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "Выполняется %(name)s на %(host)s:%(port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Выполняется останов сервера WSGI." + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "Сервер WSGI был остановлен." + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "маркер [%s] не найден" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s не содержит версию" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "Инициализация диспетчера расширений." + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "Загруженное расширение: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "Загрузка расширения %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "элемент не является потомком" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, fuzzy, python-format +msgid "Delete backup with id: %s" +msgstr "Удалить снимок с идентификатором: %s" + +#: cinder/api/contrib/backups.py:185 +#, fuzzy, python-format +msgid "Creating new backup %s" +msgstr "Создание SR %s" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "Неправильный формат тела запроса" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +#, fuzzy +msgid "Snapshot not found." +msgstr "Узел не найден" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "Узел %(host)s не найден." + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Недопустимое состояние: '%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Недопустимый параметр обновления: '%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Перевод узла %(host)s в %(state)s." + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "Узел не найден" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +#, fuzzy +msgid "Request body empty" +msgstr "Неправильный формат тела запроса" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "Тело запроса и URI не совпадают" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "Тело запроса содержит избыточное количество объектов" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "Недопустимый запрос тела" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "Обнаружена ошибка: %s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s возвратил с HTTP %(status)d" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" +"Расширение %(ext_name)s: Невозможно расширить ресурс %(collection)s: Нет " +"такого ресурса" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Расширение %(ext_name)s расширение ресурса: %(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "В запросе предоставлен не распознанный тип-содержимого" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "Тип содержимого не предоставлен в запросе" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "Пустое тело предоставлено в запросе" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "Неправильное тело запроса" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Не поддерживаемый тип содержимого" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Неправильный запрос url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s возвратил ошибку: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, " +"каждые %(unit_string)s." + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "Копия не существует" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "Элемент метаданных не найден" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "Удалить снимок с идентификатором: %s" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Создать снимок из тома %s" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "Сервер не существует" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "Удалить том с идентификатором: %s" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" + +#: cinder/api/v1/volumes.py:418 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" + +#: cinder/api/v2/volumes.py:359 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, fuzzy, python-format +msgid "unsupported compression algorithm: %s" +msgstr "неподдерживаемый раздел: %s" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Копия не существует" + +#: cinder/backup/services/swift.py:127 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "Копия не существует" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "Ожидался объект типа: %s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "Недопустимый запрос." + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, fuzzy, python-format +msgid "delete %s finished" +msgstr "_удалить: %s" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Перезагрузка копии %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "Невозможно отсоединить том %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Невозможно найти том %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Невозможно найти том %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "Нераспознанное значение read_deleted '%s'" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "Ошибка соединения с SQL (%(connstring)s). %(attempts)d попыток осталось." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "Таблица |%s| не создана!" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "Ошибка анализа 'qemu-img info'." + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Преобразование в необработанный, но текущий формат %s" + +#: cinder/image/image_utils.py:278 +#, fuzzy, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "Преобразование в необработанный, но текущий формат %s" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "Исключение: %s" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "Исходное исключение было сброшено" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "_удалить: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Класс %(fullname)s устарел: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Полный набор ФЛАГОВ:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s не в допустимых приоритетах" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Неполадка '%(e)s', попытка отправить в систему уведомлений. " +"Нагрузка=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "неизвлечённый контекст: %s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "получено %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, fuzzy, python-format +msgid "Making synchronous call on %s ..." +msgstr "Выполнение асинхронного вызова %s ..." + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Обнаружено неизвестное исключение." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"Удалённая ошибка: %(exc_type)s %(value)s\n" +"%(traceback)s." + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Возврат исключения %s вызывающему" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Повторное подключение к серверу AMQP на %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Подключение к серверу AMQP на %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Невозможно подключиться к серверу AMQP на %(hostname)s:%(port)d после " +"%(max_retries)d попыток: %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP сервер на %(hostname)s:%(port)d недоступен: %(err_str)s. Повторная " +"попытка через %(sleep_time)d секунд." + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Ошибка объявления потребителю темы '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Ошибка принятия сообщения из очереди: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Ошибка публикации сообщения в тему '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, fuzzy, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Невозможно подключиться к серверу AMQP: %s " + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "неизвлечённый контекст: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "Исключение регистрации ВМ %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +#, fuzzy +msgid "In reactor registered" +msgstr "Отсутствуют зарегистрированные ВМ" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, fuzzy, python-format +msgid "Could not create IPC directory %s" +msgstr "Ошибка удаления контейнера: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, fuzzy, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "Заданные данные: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +#, fuzzy +msgid "Creating payload" +msgstr "Создание изображения" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "получено %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "Недопустимый запрос." + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake не имеет реализации для %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "Выполняется ли соответствующая служба?" + +#: cinder/scheduler/chance.py:56 +#, fuzzy +msgid "Could not find another host" +msgstr "Невозможно найти другой compute" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "Отфильтрованы %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, fuzzy, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Ошибка schedule_%(method)s: %(ex)s" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "Невозможно декодировать параметры расписания: '%(e)s'" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Имитация выполнения команды (субпроцесс): %s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "Имитация команды привела к исключению %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, fuzzy, python-format +msgid "unrecognized argument %s" +msgstr "Нераспознанное значение read_deleted '%s'" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, fuzzy, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "Заданные данные: %s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "Итоговые данные: %s" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "Недопустимый снимок" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Отсоединить том %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"Код состояния: %(_status)s\n" +"Тело: %(_body)s" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "Ошибка аутентификации" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "Ошибка авторизации" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "объект не найден" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Выполнение %(method)s на %(relative_url)s" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "Тело: %s" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "Непредвиденный код состояния" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "Декодирование JSON: %s" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:177 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "Поиск по: %s" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:546 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "Превышена квота для %(pid)s, попытка выполнить %(min_count)s копий" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:719 +#, fuzzy +msgid "Volume status is in-use." +msgstr "том %s: том занят" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Восстановление после недопустимого выполнения. Попытка номер %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "iSCSI-устройство не найдено в %s" + +#: cinder/volume/driver.py:439 +#, fuzzy, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" +"Том ISCSI не найден в: %(mount_device)s. Будет выполнена повторная " +"проверка и попытка. Повторение: %(tries)s" + +#: cinder/volume/driver.py:451 +#, fuzzy, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "Найден узел iSCSI %(mount_device)s (после %(tries)s повторных проверок)" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +#, fuzzy +msgid "Updating volume status" +msgstr "Обновление состояния узла" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Повторное экспортирование %s томов" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "том %s: пропуск экспортирования" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "том %s: создание завершено" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "Ошибка БД: %s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "том %s: удаление" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Том до сих пор присоединён" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "том %s: удаление экспортирования" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "том %s: том занят" + +#: cinder/volume/manager.py:441 +#, fuzzy +msgid "Failed to update usages deleting volume" +msgstr "Ошибка обновления агента: %(resp)r" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "том %s: удаление завершено" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "снимок %s: создание" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "снимок %(snap_name)s: создание" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "снимок %s: создание завершено" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "снимок %s: удаление" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "снимок %s: создание завершено" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "снимок %s: удаление выполнено" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Подключить том %(volume_id)s для копии %(instance_id)s на %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "Очистить возможности" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "Принято уведомление {%s}" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "Ошибка БД: %s" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "Невозможно найти адрес %r" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +#, fuzzy +msgid "JSON Error" +msgstr "Ошибка перемещения" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, fuzzy, python-format +msgid "Configure data : %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "Невозможно найти том %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "Невозможно найти том %s" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "Ошибка перезагрузки копии" + +#: cinder/volume/drivers/coraid.py:347 +#, fuzzy, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "снимок %(snap_name)s: создание" + +#: cinder/volume/drivers/coraid.py:362 +#, fuzzy, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "Создать том из снимка %s" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "_создать: %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "образ уже присоединён" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "маркер [%s] не найден" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd не имеет пула %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Недопустимый снимок" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "Невозможно найти том %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog не выполняется: %s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog не выполняется" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +#, fuzzy +msgid "Updating cluster status info" +msgstr "Обновление состояния узла" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Невозможно получить обновлённое состояние: %s" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, fuzzy, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "Перевод узла %(host)s в %(state)s." + +#: cinder/volume/drivers/storwize_svc.py:574 +#, fuzzy, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "удаление тома %(volume_name)s, который имеет снимок" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "том %s: пропуск экспортирования" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, fuzzy, python-format +msgid "_prepare_fc_map: %s" +msgstr "_создать: %s" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Команда: %(cmd)s\n" +"Код выхода: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Создание SR %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "Узел не найден" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Том %(volume_id)s не найден." + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "Создать том из снимка %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "Удалить том с идентификатором: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Повторное экспортирование %s томов" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Присоединить том %(volume_id)s к копии %(server_id)s на %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Ошибка запуска xvp: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "том %s: удаление завершено" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +#, fuzzy +msgid "Storage type not found." +msgstr "образ не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +#, fuzzy +msgid "Masking View not found." +msgstr "образ не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +#, fuzzy +msgid "Ecom user not found." +msgstr "Сервер не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +#, fuzzy +msgid "Ecom server not found." +msgstr "Сервер не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Переподлючено к очереди" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Полномочия %(role_id)s не могут быть найдены." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Том %(volume_id)s не найден." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Не найден том для копии %(instance_id)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "Ошибка поиска vdis в SR %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, fuzzy, python-format +msgid "delete_volume: volume name: %s." +msgstr "Удалить том с идентификатором: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, fuzzy, python-format +msgid "create_export: volume name:%s" +msgstr "Создать снимок тома %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, fuzzy, python-format +msgid "_read_xml:%s" +msgstr "_создать: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, fuzzy, python-format +msgid "_execute_cli:%s" +msgstr "_удалить: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, fuzzy, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "Образ %(image_id)s недопустим: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, fuzzy, python-format +msgid "Using DFM server: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "Выполняемые копии: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "Добавление правила поставщика: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Переподлючено к очереди" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +#, fuzzy +msgid "Failed to provision dataset member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +#, fuzzy +msgid "Failed to remove and delete dataset LUN member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +#, fuzzy +msgid "Failed to remove and delete dataset Qtree member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, fuzzy, python-format +msgid "Created LUN with name %s" +msgstr "Создана папка с адресом %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Выполняемые копии: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "снимок %s: удаление выполнено" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +#, fuzzy +msgid "Nexenta SA returned the error" +msgstr "Сервер возвратил ошибку: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "Возврат команды CLIQ %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" +"Неправильный ответ на команду CLIQ %(verb)s %(cliq_args)s. " +"Результат=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Ошибка выполнения команды CLIQ %(verb)s %(cliq_args)s. Результат=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Непредвиденное количество виртуальных ip для кластера %(cluster_name)s. " +"Результат=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Сведения о томе: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "local_path не поддерживается" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "Задайте san_password или san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip должен быть назначен" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID не найден для %(zfs_poolname)s. Вывод=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "группа %s уже существует" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Неожиданная ошибка при выполнении команды." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/tl/LC_MESSAGES/cinder.po b/cinder/locale/tl/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..7e7da70c70 --- /dev/null +++ b/cinder/locale/tl/LC_MESSAGES/cinder.po @@ -0,0 +1,5575 @@ +# Tagalog translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: Tagalog \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Walang paraan para sa mensahe: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Muling kumonekta sa queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Muling kumonekta sa queue" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/tr/LC_MESSAGES/cinder.po b/cinder/locale/tr/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..89a33aa59d --- /dev/null +++ b/cinder/locale/tr/LC_MESSAGES/cinder.po @@ -0,0 +1,5573 @@ +# Turkish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-12-14 18:10+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Turkish \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/uk/LC_MESSAGES/cinder.po b/cinder/locale/uk/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..751ff76fce --- /dev/null +++ b/cinder/locale/uk/LC_MESSAGES/cinder.po @@ -0,0 +1,5576 @@ +# Ukrainian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: Ukrainian \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "без порядку для повідомлень: %s" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "отримано %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "отримано %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "отримано %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Від'єднати том %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Від'єднати том %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Оновлено з'єднання до черги" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Оновлено з'єднання до черги" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/vi_VN/LC_MESSAGES/cinder.po b/cinder/locale/vi_VN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..32cab9dc7c --- /dev/null +++ b/cinder/locale/vi_VN/LC_MESSAGES/cinder.po @@ -0,0 +1,5574 @@ +# Vietnamese (Vietnam) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Vietnamese (Viet Nam) " +"(http://www.transifex.com/projects/p/openstack/language/vi_VN/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +msgid "Connection to swift failed" +msgstr "" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:78 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:112 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, python-format +msgid "Unable to find Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:41 +#, python-format +msgid "Failed to create Volume Group: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:265 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, python-format +msgid "Unable to find group: %(group)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, python-format +msgid "Fail to create volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:321 +#, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, python-format +msgid "Unable to read image %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, python-format +msgid "Could not find handle for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, python-format +msgid "Unexpected state while cloning %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..5af90c372b --- /dev/null +++ b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po @@ -0,0 +1,5657 @@ +# Chinese (Simplified) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-04-03 23:36+0000\n" +"Last-Translator: cheesecake \n" +"Language-Team: Chinese (Simplified) \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "运行命令时出现意外错误" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"命令:%(cmd)s\n" +"退出代码:%(exit_code)s\n" +"标准输出:%(stdout)r\n" +"标准错误输出:%(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "数据库异常被包裹。" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "发生未知异常。" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "连接到glance失败" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "未授权。" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "用户没有管理员权限" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "政策不允许 %(action)s 被执行。" + +#: cinder/exception.py:155 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "没有为镜像 %(image_id)s 找到内核。" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "无法接受的参数。" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "无效的快照" + +#: cinder/exception.py:168 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "卷 %(volume_id)s 没有附加任何东西" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "把数据加载为json格式失败" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "请求无效。" + +#: cinder/exception.py:180 +#, fuzzy +msgid "The results are invalid." +msgstr "请求无效。" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "收到无效的输入" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "无效的卷类型" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "无效的卷" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "无效的内容类型 %(content_type)s。" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "该时刻服务无法使用。" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "资源没有找到。" + +#: cinder/exception.py:229 +#, fuzzy, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/exception.py:237 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "无法找到帐户 %(account_name) on Solidfire 设备" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "无效的元数据" + +#: cinder/exception.py:255 cinder/exception.py:268 +#, fuzzy +msgid "Invalid metadata size" +msgstr "无效的元数据键" + +#: cinder/exception.py:259 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "实例 %(instance_id)s 没有键为 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "卷类型 %(volume_type_id)s 没有找到。" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "快照 %(snapshot_id)s 没有找到。" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "正在删除有快照的卷 %(volume_name)s" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:303 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:307 +#, fuzzy, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "在 %(location)s 没有磁盘" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "无效的镜像href %(image_href)s。" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "镜像 %(image_id)s 没有找到。" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "服务 %(service_id)s 没有找到。" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "主机 %(host)s 没有找到。" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" + +#: cinder/exception.py:339 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "配额没有找到。" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" + +#: cinder/exception.py:368 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#: cinder/exception.py:372 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" + +#: cinder/exception.py:376 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "用户 %(user_id)s 没有找到。" + +#: cinder/exception.py:380 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "超出配额" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "迁移 %(migration_id)s 没有找到。" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "没有为实例 %(instance_id)s 找到迁移其状态为 %(status)s 。" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "找不到文件 %(file_path)s。" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "操作不允许。" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "密钥对 %(key_name)s 已经存在。" + +#: cinder/exception.py:414 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "卷类型 %(name)s 已经存在。" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "迁移错误" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "在 %(path)s 找不到配置文件。" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "找不到有效主机,原因是 %(reason)s。" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "主机 %(host)s 没有启动或者不存在。" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "超出配额" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "检测到不止一个名称为 %(vol_name) 的卷。" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "来自SolidFire API的错误响应" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "SolidFire API响应里发生错误:data=%(data)s" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" + +#: cinder/exception.py:499 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "无效的服务器状态:%(status)s" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "无法在存储库 %(sr_ref)s 上为实例 %(instance_name)s 创建 VDI" + +#: cinder/exception.py:511 +#, fuzzy, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "无法找到实例 %s 的宿主机" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "实例 %(instance_id)s 没有找到。" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "发生未知异常。" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "发生未知异常。" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "没有找到LDAP用户组 %(group_id)s。" + +#: cinder/exception.py:573 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "无效的后台:%s" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "连接到glance失败" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "跳过 %(full_task_name)s,到下次运行还剩下%(ticks_to_skip)s 跳。" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "正在运行周期性任务 %(full_task_name)s" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "在 %(full_task_name)s 期间发生的错误:%(e)s" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "向调度器通报能力。" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "JSON文件表示策略。" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "请求的规则找不到时的检查缺省规则。" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, fuzzy, python-format +msgid "Caught %s, exiting" +msgstr "快照 %s:正在删除" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +#, fuzzy +msgid "Unhandled exception" +msgstr "内层异常:%s" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "起始地址" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, fuzzy, python-format +msgid "wait wrap.failed %s" +msgstr "未知的基文件:%s" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "启动 %(topic)s 节点 (版本 %(vcs_string)s)" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "为服务 %s 创建消费者" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "因无数据库记录,服务已被中止" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "服务数据库对象消失,正在重新创建。" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "与模型服务器(model server)的连接已恢复!" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "失去与模型服务器的连接" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "标记全集:" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s:标记集合 " + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "正在抓取 %s" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "发现未知的 utils.execute 关键字参数:%r" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "正在运行cmd (subprocess):%s" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "运行结果为 %s" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r 失败,重试。" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "运行cmd (SSH):%s" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "SSH上不支持环境变量" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "SSH上不支持的进程输入参数。" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +#, fuzzy +msgid "Specify a password or private_key" +msgstr "指定san_password或者san_private_key" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "正在连接 libvirt:%s" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "回调中debug:%s" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "本地IP地址没有找到:%s" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "无法连接到 %(interface)s 的本地IP:%(ex)s" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "无效的后台:%s" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "后台 %s" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "循环调用中。" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "期望的对象类型:%s" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc:'%(name)s' 用了%(total_time).2f 秒" + +#: cinder/utils.py:1105 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "移除容器失败:%s" + +#: cinder/utils.py:1213 +#, fuzzy, python-format +msgid "Unknown byte multiplier: %s" +msgstr "未知的基文件:%s" + +#: cinder/wsgi.py:121 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "无法找到地址 %r" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "无法找到地址 %r" + +#: cinder/wsgi.py:127 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "无法找到地址 %r" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, fuzzy, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "启动%(name)s 位置在 %(host)s:%(port)s" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "关闭WSGI服务器" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "WSGI服务器已经停止。" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "你必须执行 __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "limit 参数必须是整数" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "limit参数必须是正数" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "offset 参数必须是整数" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "offset 参数必须是正数" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "没有找到标记 [%s]" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s 不包含版本" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "正在初始化扩展管理员。" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "加载的扩展:%s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "Ext name: %s" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "Ext alias: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "Ext 描述: %s" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "Ext 命名空间: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "Ext updated: %s" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "加载扩展发生异常:%s" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "正在加载扩展 %s" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "调用扩展工厂 %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "加载扩展 %(ext_factory)s 失败:%(exc)s" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "加载扩展 %(classpath)s 失败:%(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "加载扩展 %(ext_name)s 失败:%(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "元素不是子节点" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "根元素选择列表" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "模板数不匹配;把slave %(slavetag)s 添加到master %(mastertag)s" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "subclasses必须执行construct()!" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "修改用户 %s 的私钥" + +#: cinder/api/contrib/backups.py:143 +#, fuzzy, python-format +msgid "Delete backup with id: %s" +msgstr "删除id为 %s 的快照" + +#: cinder/api/contrib/backups.py:185 +#, fuzzy, python-format +msgid "Creating new backup %s" +msgstr "轮换出%d个备份" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "不正确的请求主体格式" + +#: cinder/api/contrib/backups.py:201 +#, fuzzy, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "卷 %(volume_id)s 正在 %(mountpoint)s 上启动" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +#, fuzzy +msgid "Snapshot not found." +msgstr "没有找到主机" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "无法理解XML" + +#: cinder/api/contrib/hosts.py:133 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "主机 %(host)s 没有找到。" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "无效的状态:'%s'" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "无效的更新设置:'%s'" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "把主机 %(host)s 设置为 %(state)s。" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource是只有管理员才能执行的功能。" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "没有找到主机" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +#, fuzzy +msgid "Request body empty" +msgstr "不正确的请求主体格式" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "请求主体和URI不匹配" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "请求主体包含太多items" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "无效的请求主体" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "抓到错误:%s" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s 随HTTP %(status)d返回" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "必须明确一个ExtensionManager类" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "扩展资源:%s" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "扩展%(ext_name)s:无法扩展资源 %(collection)s:没有那种资源" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "扩展资源的扩展 %(ext_name)s:%(collection)s" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "无法理解JSON" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "过多主体密钥" + +#: cinder/api/openstack/wsgi.py:581 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "扩展资源:%s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "错误抛出: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP 异常抛出:%s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "请求中提供了无法识别的 Content-Type" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "请求中没有提供 Content-Type" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "请求中没有提供主体" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "没有该动作:%s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "错误格式的请求主体" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "不支持的Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "错误格式的请求url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s返回错误:%(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 %(unit_string)s。" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "这个请求受到频率限制。" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +#, fuzzy +msgid "snapshot does not exist" +msgstr "实例不存在" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "元数据项目未找到" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "删除id为 %s 的快照" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "为卷 %s 创建快照" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +#, fuzzy +msgid "volume does not exist" +msgstr "域不存在" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "删除id为 %s 的卷" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "提供了无效的imageRef。" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "创建 %s GB的卷" + +#: cinder/api/v1/volumes.py:418 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" + +#: cinder/api/v2/volumes.py:359 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "卷组状态必须可获取" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "状态必须可用" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "卷组状态必须可获取" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, fuzzy, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, fuzzy, python-format +msgid "unsupported compression algorithm: %s" +msgstr "不支持的分区:%s" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "实例不存在" + +#: cinder/backup/services/swift.py:127 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "实例不存在" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "期望的对象类型:%s" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "请求无效。" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, fuzzy, python-format +msgid "delete %s finished" +msgstr "_delete: %s" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, fuzzy, python-format +msgid "Removing iscsi_target for: %s" +msgstr "正在删除基文件:%s" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "尝试删除不存在的控制台%(console_id)s。" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, fuzzy, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "正在重启虚拟机 %s" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "无法分离 %s 卷" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "无法找到 %s 卷" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "无法找到 %s 卷" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "无法为VDI %s 找到VBD" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "无法为VDI %s 找到VBD" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "使用空的请求上下文是不推荐的" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "无法识别的 read_deleted 取值”%s“" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "没有id为%(sm_backend_id)s的后台配置" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "没有 sm_flavor 调用 %(sm_flavor)s" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/db/sqlalchemy/api.py:1987 +#, fuzzy, python-format +msgid "No backup with id %(backup_id)s" +msgstr "没有id为%(sm_backend_id)s的后台配置" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "version应该是整数" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL连接失败 (%(connstring)s)。还剩 %(attempts)d 次。" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "表 |%s| 没有创建" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +#, fuzzy +msgid "quota_classes table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +#, fuzzy +msgid "quota_usages table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +#, fuzzy +msgid "reservations table not dropped" +msgstr "dns_domains 表没有删除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +#, fuzzy +msgid "volume_glance_metadata table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +#, fuzzy +msgid "backups table not dropped" +msgstr "dns_domains 表没有删除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +#, fuzzy +msgid "snapshot_metadata table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info'解析失败" + +#: cinder/image/image_utils.py:231 +#, fuzzy, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "转化为裸格式,但目前格式是 %s" + +#: cinder/image/image_utils.py:278 +#, fuzzy, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "转化为裸格式,但目前格式是 %s" + +#: cinder/openstack/common/exception.py:104 +#, fuzzy +msgid "Uncaught exception" +msgstr "得到异常:%s" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "正在丢弃原来的异常。" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "获得信号量 \"%(lock)s\" 为方法 \"%(method)s\" ...锁" + +#: cinder/openstack/common/lockutils.py:199 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "正在 试图获取锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/openstack/common/lockutils.py:226 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/openstack/common/lockutils.py:234 +#, fuzzy, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/openstack/common/log.py:226 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "_delete: %s" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "syslog设备必须作为一个 %s 。" + +#: cinder/openstack/common/log.py:537 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "类 %(fullname)s 是不推荐的:%(msg)s" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +#, fuzzy +msgid "in fixed duration looping call" +msgstr "循环调用中。" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +#, fuzzy +msgid "in dynamic looping call" +msgstr "循环调用中。" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, fuzzy, python-format +msgid "Failed to understand rule %(match)r" +msgstr "注入文件失败:%(resp)r" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Full set of CONF:" +msgstr "标记全集:" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s 不在有效的优先级" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "Problem '%(e)s' 试图发送到通知系统。Payload=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "未打包的上下文:%s" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "已接收 %s" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "没有适用于消息的方法:%s" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "没有适用于消息的方法:%s" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, fuzzy, python-format +msgid "Making synchronous call on %s ..." +msgstr "在 %s 做异步call" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "消息ID(MSG_ID)是 %s" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "在 %s 做异步cast" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "做异步fanout cast" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "发生未知异常。" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"远程错误:%(exc_type)s %(value)s\n" +"%(traceback)s。" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "无效的RPC连接重用。" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, fuzzy, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "删除 %(base_file)s 失败,错误是 %(error)s" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回 %s 异常给调用者" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新连接位于 %(hostname)s:%(port)d 的AMQP服务器" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "连接到位于 %(hostname)s:%(port)d 的AMQP服务器" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"无法连接到位于%(hostname)s:%(port)d的AMQP server,尝试已经 %(max_retries)d " +"次:%(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "位于%(hostname)s:%(port)d的AMQP服务器不可达:%(err_str)s。%(sleep_time)d 秒钟后请再尝试。" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "为topic '%(topic)s'声明消费者失败:%(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "等待RPC响应超时:%s" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "从队列中消费消息失败:%s" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "给topic '%(topic)s'发布消息失败:%(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, fuzzy, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "无法连接到AMQP服务器:%s " + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "重建AMQP队列" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, fuzzy, python-format +msgid "Deserializing: %s" +msgstr "Ext 描述: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "未打包的上下文:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +#, fuzzy +msgid "Registering reactor" +msgstr "正在注销虚拟机 %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +#, fuzzy +msgid "In reactor registered" +msgstr "没有虚拟机注册" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, fuzzy, python-format +msgid "Could not create IPC directory %s" +msgstr "移除容器失败:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, fuzzy, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "给定数据:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +#, fuzzy +msgid "Creating payload" +msgstr "正在创建镜像" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "已接收 %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "请求无效。" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake 没有 %s 的实现" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "正确的服务在运行吗?" + +#: cinder/scheduler/chance.py:56 +#, fuzzy +msgid "Could not find another host" +msgstr "无法找到另一个计算节点" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "必须实现一个回滚 schedule" + +#: cinder/scheduler/driver.py:93 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "必须实现一个回滚 schedule" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "过滤掉的主机 %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, fuzzy, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "schedule_%(method)s 失败:%(ex)s" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "无法统计调度器的选项文件 %(filename)s:“%(e)s”" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "无法解码调度器的选项:“%(e)s”" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "卷没有足够可分配的空间" + +#: cinder/scheduler/filters/capacity_filter.py:37 +#, fuzzy +msgid "Free capacity not set: volume node info collection broken." +msgstr "未设置 VCPUs;假设 CPU 集合损坏了" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "伪执行命令(子进程):%s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "伪命令匹配 %s" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "伪命令引起异常 %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"下面的迁移缺少了降级:\n" +"\t%s" + +#: cinder/tests/test_storwize_svc.py:243 +#, fuzzy, python-format +msgid "unrecognized argument %s" +msgstr "无法识别的 read_deleted 取值”%s“" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, fuzzy, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "给定数据:%s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "结果数据:%s" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "无效的快照" + +#: cinder/tests/integrated/test_login.py:31 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "分离卷 %s" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"状态码: %(_status)s\n" +"主体: %(_body)s" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "认证错误" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "授权错误" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "条目没有找到" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "正在 %(relative_url)s 执行 %(method)s" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "主体:%s" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "意外的状态码" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "解码JSON:%s" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "状态必须可用" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:177 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "搜索条件: %s" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "已经附加" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "已经分离" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "必须可用" + +#: cinder/volume/api.py:537 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:546 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "%(pid)s 已经超过配额,试图运行 %(min_count)s 个实例" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +#, fuzzy +msgid "Metadata property key greater than 255 characters" +msgstr "安全组 %s 不能比255个字符更长。" + +#: cinder/volume/api.py:631 +#, fuzzy +msgid "Metadata property value greater than 255 characters" +msgstr "安全组 %s 不能比255个字符更长。" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:719 +#, fuzzy +msgid "Volume status is in-use." +msgstr "卷 %s:卷繁忙" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "从失败的执行中恢复。尝试编号 %s" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "采用discovery,ISCSI provider_location 没有存储" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery:找到 %s" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "在 %s 未找到iSCSI设备" + +#: cinder/volume/driver.py:439 +#, fuzzy, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "在 %(mount_device)s 上还没有找到iSCSI卷。将再次扫描并重试。尝试次数:%(tries)s" + +#: cinder/volume/driver.py:451 +#, fuzzy, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "找到iSCSI节点 %(mount_device)s (经过%(tries)s 次再扫描)" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +#, fuzzy +msgid "Updating volume status" +msgstr "更新主机状态" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "重新导出卷%s" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "卷 %s:跳过导出" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, fuzzy, python-format +msgid "Resuming delete on volume: %s" +msgstr "正在删除volumeID:%s " + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" + +#: cinder/volume/manager.py:228 +#, fuzzy, python-format +msgid "volume %s: creating from snapshot" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:232 +#, fuzzy, python-format +msgid "volume %s: creating from existing volume" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:236 +#, fuzzy, python-format +msgid "volume %s: creating from image" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "volume %s: create failed" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "卷%s:创建成功" + +#: cinder/volume/manager.py:324 +#, fuzzy, python-format +msgid "Error: %s" +msgstr "数据库错误:%s" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "卷%s:删除中" + +#: cinder/volume/manager.py:412 +#, fuzzy +msgid "volume is not local to this node" +msgstr "卷不属于这个节点" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "卷%s:正在移除导出" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "卷 %s:卷繁忙" + +#: cinder/volume/manager.py:441 +#, fuzzy +msgid "Failed to update usages deleting volume" +msgstr "更新代理失败:%(resp)r" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "卷%s:删除成功" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "快照 %s:正在创建" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "快照 %(snap_name)s:正在创建" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "快照 %s:创建成功" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "快照 %s:正在删除" + +#: cinder/volume/manager.py:505 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "快照 %s:创建成功" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "快照 %s:删除成功" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "把卷 %(volume_id)s 附加到实例 %(instance_id)s 上位置在 %(device)s" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "清理能力" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "收到通知 {%s}" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "数据库错误:%s" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "id不能是None" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "name不能是None" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, fuzzy, python-format +msgid "Message : %(message)s" +msgstr "%(code)s: %(message)s" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "无法找到地址 %r" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +#, fuzzy +msgid "JSON Error" +msgstr "迁移错误" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, fuzzy, python-format +msgid "Configure data : %s" +msgstr "给定数据:%s" + +#: cinder/volume/drivers/coraid.py:177 +#, fuzzy, python-format +msgid "Configure response : %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/coraid.py:199 +#, fuzzy, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "无法找到 %s 卷" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "无法找到 %s 卷" + +#: cinder/volume/drivers/coraid.py:335 +#, fuzzy, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "重新启动实例失败" + +#: cinder/volume/drivers/coraid.py:347 +#, fuzzy, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "快照 %(snap_name)s:正在创建" + +#: cinder/volume/drivers/coraid.py:362 +#, fuzzy, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "从快照 %s 创建卷" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "嵌套(调用)返回 %s" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "加载扩展发生异常:%s" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "镜像已经挂载" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/lvm.py:345 +#, fuzzy, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:364 +#, fuzzy, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "没有找到标记 [%s]" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, fuzzy, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:542 +#, fuzzy, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "跳过remove_export。没有为卷导出iscsi_target:%d" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "RBD没有池 %s" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "无效的快照" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "无法找到 %s 卷" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog 没有工作:%s" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "Sheepdog 没有工作" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "SolidFire API 调用的参数:%s" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "调用 json.loads() 引起异常:%s" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "SolidFire API调用结果:%s" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "找到solidfire帐户:%s" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire帐户:%s 不存在,正在创建..." + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "进入SolidFire delete_volume..." + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "离开SolidFire delete_volume" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "正在执行SolidFire ensure_export..." + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "正在执行SolidFire create_export..." + +#: cinder/volume/drivers/solidfire.py:558 +#, fuzzy +msgid "Updating cluster status info" +msgstr "更新主机状态" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "无法得到最新的状态:%s" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, fuzzy, python-format +msgid "pool %s doesn't exist" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, fuzzy, python-format +msgid "%s is not set" +msgstr "租户ID没有设" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, fuzzy, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "把主机 %(host)s 设置为 %(state)s。" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, fuzzy, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "正在删除有快照的卷 %(volume_name)s" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, fuzzy, python-format +msgid "volume %s mapping to multi host" +msgstr "卷 %s:跳过导出" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, fuzzy, python-format +msgid "_prepare_fc_map: %s" +msgstr "_create: %s" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"命令:%(cmd)s\n" +"退出代码:%(exit_code)s\n" +"标准输出:%(stdout)r\n" +"标准错误输出:%(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "正在创建存储库 %s" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, fuzzy, python-format +msgid "Disk not found: %s" +msgstr "没有找到主机" + +#: cinder/volume/drivers/zadara.py:218 +#, fuzzy, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "把“%(method)s”投放在 %(topic)s \"%(host)s\"" + +#: cinder/volume/drivers/zadara.py:240 +#, fuzzy, python-format +msgid "Operation completed. %(data)s" +msgstr "确认完成" + +#: cinder/volume/drivers/zadara.py:350 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +#, fuzzy +msgid "Entering create_volume." +msgstr "进入SolidFire create_volume..." + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "从快照 %s 创建卷" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +#, fuzzy +msgid "Entering create_cloned_volume." +msgstr "进入SolidFire create_volume..." + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +#, fuzzy +msgid "Entering delete_volume." +msgstr "进入SolidFire delete_volume..." + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "删除id为 %s 的卷" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "重新导出卷%s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "把卷 %(volume_id)s 附加到实例 %(server_id)s 的 %(device)s 设备上" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "启动xvp发生错误:%s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, fuzzy, python-format +msgid "Map volume: %(volume)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, fuzzy, python-format +msgid "Unmap volume: %(volume)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, fuzzy, python-format +msgid "Volume %s is already mapped." +msgstr "rootfs 已经被移除了" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, fuzzy, python-format +msgid "Found Storage Type: %s" +msgstr "找到solidfire帐户:%s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +#, fuzzy +msgid "Storage type not found." +msgstr "镜像没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +#, fuzzy +msgid "Masking View not found." +msgstr "镜像没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +#, fuzzy +msgid "Ecom user not found." +msgstr "没有找到服务器。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +#, fuzzy +msgid "Ecom server not found." +msgstr "没有找到服务器。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "角色 %(role_id)s 没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "在存储库 %s 寻找VDIs出错" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "引起异常 NotFound: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, fuzzy, python-format +msgid "delete_volume: volume name: %s." +msgstr "删除id为 %s 的卷" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, fuzzy, python-format +msgid "create_export: volume name:%s" +msgstr "创建卷 %s 的快照" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, fuzzy, python-format +msgid "_read_xml:%s" +msgstr "_create: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, fuzzy, python-format +msgid "_execute_cli:%s" +msgstr "_delete: %s" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, fuzzy, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, fuzzy, python-format +msgid "Using DFM server: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, fuzzy, python-format +msgid "Using storage service: %s" +msgstr "正在运行的实例:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, fuzzy, python-format +msgid "Using vfiler: %s" +msgstr "正在删除基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +#, fuzzy +msgid "Connected to DFM server" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "未知的基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +#, fuzzy +msgid "Failed to provision dataset member" +msgstr "更新数据库失败" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +#, fuzzy +msgid "Failed to remove and delete dataset LUN member" +msgstr "更新数据库失败" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +#, fuzzy +msgid "Failed to remove and delete dataset Qtree member" +msgstr "更新数据库失败" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, fuzzy, python-format +msgid "Created LUN with name %s" +msgstr "已经创建路径为 %s 的目录" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "嵌套(调用)返回 %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "正在删除基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "快照 %s:删除成功" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, fuzzy, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +#, fuzzy +msgid "Nexenta SA returned the error" +msgstr "服务器返回错误:%s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "给定数据:%s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +#, fuzzy +msgid "Bad response from server" +msgstr "来自SolidFire API的错误响应" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "返回CLIQ命令 %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "CLIQ命令 %(verb)s %(cliq_args)s 错误格式的响应。Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "在运行CLIQ命令 %(verb)s %(cliq_args)s 时发生错误。输出结果 Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "集群 %(cluster_name)s 有意外数量的虚拟 ip 地址。输出结果 Result=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "卷信息:%(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "不支持local_path" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "握手出错:%s" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "指定san_password或者san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip必须设置" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "没有为 %(zfs_poolname)s 找到LUID。Output=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "握手出错:%s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "安全组 %s 已经存在" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "运行命令时出现意外错误" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..7bbe63bbbf --- /dev/null +++ b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po @@ -0,0 +1,5598 @@ +# Chinese (Traditional) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2013-05-21 18:10+0000\n" +"PO-Revision-Date: 2012-03-07 02:00+0000\n" +"Last-Translator: Charles Hsu \n" +"Language-Team: Chinese (Traditional) \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:92 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:63 +msgid "Unexpected error while running command." +msgstr "非預期的執行錯誤" + +#: cinder/exception.py:66 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代碼: %(exit_code)s\n" +"標準輸出: %(stdout)r\n" +"標準錯誤輸出: %(stderr)r" + +#: cinder/exception.py:90 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:104 +msgid "An unknown exception occurred." +msgstr "發生一個未知例外" + +#: cinder/exception.py:125 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:138 +msgid "Connection to glance failed" +msgstr "連接到glance失敗" + +#: cinder/exception.py:142 +msgid "Not authorized." +msgstr "未被授權" + +#: cinder/exception.py:147 +msgid "User does not have admin privileges" +msgstr "使用者並沒有管理者權力" + +#: cinder/exception.py:151 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:155 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:159 +msgid "Unacceptable parameters." +msgstr "不可接受的參數值" + +#: cinder/exception.py:164 +msgid "Invalid snapshot" +msgstr "無效的快照(snapshot)" + +#: cinder/exception.py:168 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:172 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:176 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:180 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:184 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:188 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:192 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:196 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:200 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:207 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:211 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:215 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:219 +msgid "Expected a uuid but received %(uuid)." +msgstr "" + +#: cinder/exception.py:223 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s persistence file could not be found." +msgstr "" + +#: cinder/exception.py:233 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:251 cinder/exception.py:264 +msgid "Invalid metadata" +msgstr "" + +#: cinder/exception.py:255 cinder/exception.py:268 +msgid "Invalid metadata size" +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:272 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:276 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:281 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:286 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:290 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:294 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:327 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:331 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:335 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:339 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:343 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:347 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:351 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:356 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:360 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:364 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:368 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:376 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:380 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:384 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:388 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:397 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:401 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:418 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:422 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:426 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:430 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:434 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:438 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:442 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:449 +msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +msgstr "" + +#: cinder/exception.py:454 +msgid "Maximum volume/snapshot size exceeded" +msgstr "" + +#: cinder/exception.py:458 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:466 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:470 +#, python-format +msgid "3PAR Host already exists: %(err)s. %(info)s" +msgstr "" + +#: cinder/exception.py:474 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:478 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:483 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:487 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:491 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:495 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:499 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:503 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:507 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "無法替 instance實例 %(instance_name)s , 建立 VDI 在SR %(sr_ref)s" + +#: cinder/exception.py:511 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:515 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:519 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:523 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:536 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "發生一個未知例外" + +#: cinder/exception.py:540 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:544 cinder/exception.py:556 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:548 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "發生一個未知例外" + +#: cinder/exception.py:552 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/exception.py:560 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:565 +msgid "Failed to copy image to volume" +msgstr "" + +#: cinder/exception.py:569 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:573 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:577 +#, fuzzy +msgid "Connection to swift failed" +msgstr "連接到glance失敗" + +#: cinder/flags.py:145 +msgid "Deploy v1 of the Cinder API. " +msgstr "" + +#: cinder/flags.py:148 +msgid "Deploy v2 of the Cinder API. " +msgstr "" + +#: cinder/manager.py:158 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:164 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:171 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:216 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:704 +#, python-format +msgid "Created reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:726 +#, python-format +msgid "Failed to commit reservations %(reservations)s" +msgstr "" + +#: cinder/quota.py:747 +#, python-format +msgid "Failed to roll back reservations %(reservations)s" +msgstr "" + +#: cinder/service.py:124 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:174 cinder/openstack/common/service.py:164 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/service.py:213 cinder/openstack/common/service.py:200 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/service.py:231 cinder/openstack/common/service.py:121 +#: cinder/openstack/common/service.py:218 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/service.py:236 cinder/openstack/common/service.py:223 +msgid "Unhandled exception" +msgstr "" + +#: cinder/service.py:243 cinder/openstack/common/service.py:230 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/service.py:253 cinder/openstack/common/service.py:240 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/service.py:272 cinder/openstack/common/service.py:257 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/service.py:275 +#, python-format +msgid "Child %(pid)d exited with status %(code)d" +msgstr "" + +#: cinder/service.py:278 cinder/openstack/common/service.py:265 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/service.py:286 +#, python-format +msgid "_wait_child %d" +msgstr "" + +#: cinder/service.py:302 +#, python-format +msgid "wait wrap.failed %s" +msgstr "" + +#: cinder/service.py:310 cinder/openstack/common/service.py:293 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/service.py:321 cinder/openstack/common/service.py:304 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/service.py:353 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:367 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:458 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:495 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:510 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:516 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:608 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:615 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:147 cinder/openstack/common/processutils.py:122 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:153 +msgid "" +"The root_helper option (which lets you specify a root wrapper different " +"from cinder-rootwrap, and defaults to using sudo) is now deprecated. You " +"should use the rootwrap_config option instead." +msgstr "" + +#: cinder/utils.py:169 cinder/openstack/common/processutils.py:137 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:186 cinder/utils.py:267 +#: cinder/openstack/common/processutils.py:162 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:224 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:199 cinder/openstack/common/processutils.py:174 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:241 cinder/volume/drivers/san/hp/hp_3par_common.py:197 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:243 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:247 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:316 cinder/volume/drivers/huawei/huawei_iscsi.py:91 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:332 cinder/volume/drivers/huawei/huawei_iscsi.py:99 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:373 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:532 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:535 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:569 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:580 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:630 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:813 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:903 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1105 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:1213 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/wsgi.py:121 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:124 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "找不到Volume %s" + +#: cinder/wsgi.py:127 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:163 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:207 +#, python-format +msgid "Started %(name)s on %(_host)s:%(_port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:240 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:309 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:63 cinder/api/common.py:97 cinder/volume/api.py:359 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:66 cinder/api/common.py:101 cinder/volume/api.py:356 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:91 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:105 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:133 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:160 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:184 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:199 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:238 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:241 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:244 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:258 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:264 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:280 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:289 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:357 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:268 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:417 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:742 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:861 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:80 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:230 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:221 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:61 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:83 cinder/api/openstack/wsgi.py:158 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:133 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:162 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:177 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:202 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:210 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/quotas.py:63 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:102 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:106 +#: cinder/api/v1/snapshot_metadata.py:77 cinder/api/v1/volume_metadata.py:77 +#: cinder/api/v2/snapshot_metadata.py:77 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:109 +#: cinder/api/v1/snapshot_metadata.py:81 cinder/api/v1/volume_metadata.py:81 +#: cinder/api/v2/snapshot_metadata.py:81 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:159 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:163 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/middleware/fault.py:45 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:54 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:54 cinder/api/middleware/sizelimit.py:63 +#: cinder/api/middleware/sizelimit.py:77 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:74 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:85 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:115 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:128 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:134 cinder/api/openstack/wsgi.py:537 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:542 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:581 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/v1/snapshot_metadata.py:55 cinder/api/v1/snapshot_metadata.py:73 +#: cinder/api/v1/snapshot_metadata.py:98 cinder/api/v1/snapshot_metadata.py:123 +#: cinder/api/v1/volume_metadata.py:55 cinder/api/v1/volume_metadata.py:73 +#: cinder/api/v1/volume_metadata.py:98 cinder/api/v1/volume_metadata.py:123 +#: cinder/api/v2/snapshot_metadata.py:55 cinder/api/v2/snapshot_metadata.py:73 +#: cinder/api/v2/snapshot_metadata.py:98 cinder/api/v2/snapshot_metadata.py:123 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:144 cinder/api/v2/limits.py:144 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:270 cinder/api/v2/limits.py:270 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:39 cinder/api/v1/snapshot_metadata.py:119 +#: cinder/api/v1/snapshot_metadata.py:158 cinder/api/v2/snapshot_metadata.py:39 +#: cinder/api/v2/snapshot_metadata.py:119 +#: cinder/api/v2/snapshot_metadata.py:158 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:141 +#: cinder/api/v1/snapshot_metadata.py:151 cinder/api/v1/volume_metadata.py:141 +#: cinder/api/v1/volume_metadata.py:151 cinder/api/v2/snapshot_metadata.py:141 +#: cinder/api/v2/snapshot_metadata.py:151 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:123 cinder/api/v2/snapshots.py:123 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:179 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:177 cinder/api/v2/snapshots.py:188 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:39 cinder/api/v1/volume_metadata.py:119 +#: cinder/api/v1/volume_metadata.py:158 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:236 cinder/api/v2/volumes.py:153 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:278 cinder/api/v1/volumes.py:282 +#: cinder/api/v2/volumes.py:209 cinder/api/v2/volumes.py:213 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:339 cinder/api/v2/volumes.py:272 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:418 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/volumes.py:359 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:53 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:78 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:112 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:117 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:126 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:140 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:145 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:151 +msgid "Volume to be restored to is smaller than the backup to be restored" +msgstr "" + +#: cinder/backup/api.py:155 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:79 +msgid "Cleaning up incomplete backup operations" +msgstr "" + +#: cinder/backup/manager.py:83 +#, python-format +msgid "Resetting volume %s to available (was backing-up)" +msgstr "" + +#: cinder/backup/manager.py:87 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)" +msgstr "" + +#: cinder/backup/manager.py:98 +#, python-format +msgid "Resetting backup %s to error (was creating)" +msgstr "" + +#: cinder/backup/manager.py:104 +#, python-format +msgid "Resetting backup %s to available (was restoring)" +msgstr "" + +#: cinder/backup/manager.py:109 +#, python-format +msgid "Resuming delete on backup: %s" +msgstr "" + +#: cinder/backup/manager.py:119 +#, python-format +msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:128 +#, python-format +msgid "" +"create_backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:137 +#, python-format +msgid "" +"create_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:160 +#, python-format +msgid "create_backup finished. backup: %s" +msgstr "" + +#: cinder/backup/manager.py:166 +#, python-format +msgid "" +"restore_backup started, restoring backup: %(backup_id)s to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:175 +#, python-format +msgid "" +"restore_backup aborted, expected volume status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:183 +#, python-format +msgid "" +"restore_backup aborted, expected backup status %(expected_status)s but " +"got %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:199 +#, python-format +msgid "" +"restore_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:220 +#, python-format +msgid "" +"restore_backup finished, backup: %(backup_id)s restored to volume: " +"%(volume_id)s" +msgstr "" + +#: cinder/backup/manager.py:228 +#, python-format +msgid "delete_backup started, backup: %s" +msgstr "" + +#: cinder/backup/manager.py:234 +#, python-format +msgid "" +"delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s" +msgstr "" + +#: cinder/backup/manager.py:244 +#, python-format +msgid "" +"delete_backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]" +msgstr "" + +#: cinder/backup/manager.py:264 +#, python-format +msgid "delete_backup finished, backup %s deleted" +msgstr "" + +#: cinder/backup/services/swift.py:96 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/services/swift.py:117 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/services/swift.py:122 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/services/swift.py:127 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/services/swift.py:133 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:148 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/services/swift.py:159 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/services/swift.py:169 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:184 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/services/swift.py:188 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/services/swift.py:193 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/services/swift.py:197 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/services/swift.py:207 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "無效的Keypair" + +#: cinder/backup/services/swift.py:221 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/services/swift.py:237 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/services/swift.py:244 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/services/swift.py:248 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/services/swift.py:252 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/services/swift.py:257 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/services/swift.py:260 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:262 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/services/swift.py:268 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/services/swift.py:276 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/services/swift.py:281 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/services/swift.py:287 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/services/swift.py:293 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/services/swift.py:299 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:309 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/services/swift.py:323 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/services/swift.py:331 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/services/swift.py:339 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:344 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/services/swift.py:348 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/services/swift.py:362 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:371 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/services/swift.py:374 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/services/swift.py:380 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:152 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:171 cinder/brick/iscsi/iscsi.py:264 +#: cinder/brick/iscsi/iscsi.py:400 cinder/brick/iscsi/iscsi.py:409 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:181 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:192 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:209 cinder/brick/iscsi/iscsi.py:426 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:218 cinder/brick/iscsi/iscsi.py:434 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:270 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:356 +msgid "rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:376 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:416 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:455 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:35 +#, fuzzy, python-format +msgid "Unable to find Volume Group: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/brick/local_dev/lvm.py:41 +#, fuzzy, python-format +msgid "Failed to create Volume Group: %s" +msgstr "找不到Volume %s" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "找不到Volume %s" + +#: cinder/brick/local_dev/lvm.py:265 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/brick/local_dev/lvm.py:284 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:337 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/common/sqlalchemyutils.py:68 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:116 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:51 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:178 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:786 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %(unders)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1797 cinder/db/sqlalchemy/api.py:1823 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1880 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1924 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1987 +#, python-format +msgid "No backup with id %(backup_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:74 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:101 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/session.py:71 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:132 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:243 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:272 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:85 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:123 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:82 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:47 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:135 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:142 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:149 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:62 +msgid "Exception while creating table 'volume_glance_metedata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:77 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:94 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:60 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/image/glance.py:151 +#, python-format +msgid "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:116 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/image/image_utils.py:224 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:231 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:248 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/image/image_utils.py:278 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/openstack/common/exception.py:104 +msgid "Uncaught exception" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:188 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:199 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:226 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:234 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:226 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:329 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:379 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:537 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:84 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:91 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:131 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/service.py:112 +#: cinder/openstack/common/service.py:275 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/openstack/common/service.py:261 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/strutils.py:72 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:125 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:141 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:171 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:106 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:75 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:200 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:291 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:337 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:406 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:415 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:443 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:292 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:586 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:589 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:623 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:632 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:660 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "發生一個未知例外" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:169 +#: cinder/openstack/common/rpc/impl_qpid.py:133 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:480 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:502 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:539 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:555 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:609 +#: cinder/openstack/common/rpc/impl_qpid.py:403 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:627 +#: cinder/openstack/common/rpc/impl_qpid.py:418 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:631 +#: cinder/openstack/common/rpc/impl_qpid.py:422 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:670 +#: cinder/openstack/common/rpc/impl_qpid.py:457 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:351 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:357 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:370 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:430 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:97 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:104 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:140 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:141 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:142 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:150 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:162 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:204 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:209 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:273 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:311 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:345 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:380 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:392 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:407 +msgid "Out reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:411 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:453 +#, python-format +msgid "CONSUMER GOT %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:465 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:471 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:496 +#, python-format +msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:505 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:510 +#, python-format +msgid "ROUTER RELAY-OUT QUEUED %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:513 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:532 +#, python-format +msgid "Could not create IPC directory %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:542 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:576 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:578 +#, python-format +msgid "ROUTER RELAY-OUT %(data)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:600 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:628 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:635 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:687 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:700 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:713 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:716 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:719 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:720 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:729 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:736 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:760 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:763 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:767 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:770 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/scheduler/chance.py:51 cinder/scheduler/simple.py:90 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:56 +msgid "Could not find another host" +msgstr "" + +#: cinder/scheduler/driver.py:89 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:93 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:113 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:131 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:160 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:208 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:214 +#, python-format +msgid "Choosing %(best_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:240 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:244 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:266 +msgid "service is down or disabled." +msgstr "" + +#: cinder/scheduler/manager.py:119 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:69 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:79 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:37 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:51 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/filters/retry_filter.py:41 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/tests/fake_driver.py:45 cinder/volume/driver.py:506 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:93 cinder/tests/fake_driver.py:98 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:59 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:243 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1217 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1220 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1225 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:638 +#, fuzzy +msgid "Invalid input" +msgstr "無效的快照(snapshot)" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:37 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:46 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:54 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:62 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:104 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:106 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:124 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:150 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:160 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:167 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/volume/api.py:96 +msgid "May specify only one of snapshot, imageRef or source volume" +msgstr "" + +#: cinder/volume/api.py:103 cinder/volume/api.py:449 +#: cinder/volume/manager.py:554 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:108 +msgid "Volume size cannot be lesser than the Snapshot size" +msgstr "" + +#: cinder/volume/api.py:117 +msgid "Unable to clone volumes that are in an error state" +msgstr "" + +#: cinder/volume/api.py:123 +msgid "Clones currently must be >= original volume size." +msgstr "" + +#: cinder/volume/api.py:140 +#, python-format +msgid "Volume size '%s' must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/api.py:150 +msgid "Size of specified image is larger than volume size." +msgstr "" + +#: cinder/volume/api.py:154 +msgid "Image minDisk size is larger than the volume size." +msgstr "" + +#: cinder/volume/api.py:168 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:177 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/volume/api.py:308 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:316 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:321 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:374 cinder/volume/api.py:432 +#: cinder/volume/volume_types.py:65 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:452 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:459 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:470 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:519 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:537 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:546 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:595 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:623 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:627 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:631 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:716 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:719 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/driver.py:94 cinder/volume/drivers/netapp/nfs.py:253 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:221 cinder/volume/drivers/emc/emc_smis_iscsi.py:119 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:269 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/driver.py:273 cinder/volume/drivers/emc/emc_smis_iscsi.py:162 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:361 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:378 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:437 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/volume/driver.py:439 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/volume/driver.py:469 cinder/volume/manager.py:689 +#: cinder/volume/drivers/lvm.py:560 cinder/volume/drivers/lvm.py:676 +#: cinder/volume/drivers/storwize_svc.py:1344 +#: cinder/volume/drivers/emc/emc_smis_common.py:855 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:241 +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1503 +#: cinder/volume/drivers/netapp/iscsi.py:1114 +#: cinder/volume/drivers/netapp/iscsi.py:1484 +#: cinder/volume/drivers/netapp/iscsi.py:2236 +#: cinder/volume/drivers/netapp/iscsi.py:2515 +#: cinder/volume/drivers/nexenta/volume.py:317 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/driver.py:544 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:122 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:149 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:154 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:156 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:219 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "volume %s: creating from snapshot" +msgstr "" + +#: cinder/volume/manager.py:232 +#, python-format +msgid "volume %s: creating from existing volume" +msgstr "" + +#: cinder/volume/manager.py:236 +#, python-format +msgid "volume %s: creating from image" +msgstr "" + +#: cinder/volume/manager.py:245 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:277 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "volume %s: create failed" +msgstr "" + +#: cinder/volume/manager.py:316 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:324 +#, python-format +msgid "Error: %s" +msgstr "" + +#: cinder/volume/manager.py:350 +#, python-format +msgid "volume %s: Error trying to reschedule create" +msgstr "" + +#: cinder/volume/manager.py:368 +msgid "Retry info not present, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:372 +msgid "No request spec, will not reschedule" +msgstr "" + +#: cinder/volume/manager.py:377 +#, python-format +msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +msgstr "" + +#: cinder/volume/manager.py:406 cinder/volume/manager.py:419 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:412 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:417 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:422 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:441 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:445 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:460 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:466 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:484 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:492 cinder/volume/manager.py:502 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:505 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:530 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:533 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:551 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:608 +#, python-format +msgid "Downloaded image %(image_id)s to %(volume_id)s successfully" +msgstr "" + +#: cinder/volume/manager.py:626 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:702 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:706 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/volume_types.py:50 cinder/volume/volume_types.py:97 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:109 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:128 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:91 +msgid "Running with CoraidDriver for ESM EtherCLoud" +msgstr "" + +#: cinder/volume/drivers/coraid.py:102 +#, python-format +msgid "Update session cookie %(session)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:108 cinder/volume/drivers/coraid.py:183 +#, python-format +msgid "Message : %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:125 +#, python-format +msgid "Error while trying to set group: %(message)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:128 +#, fuzzy, python-format +msgid "Unable to find group: %(group)s" +msgstr "無法卸載 Volume %s" + +#: cinder/volume/drivers/coraid.py:161 +msgid "ESM urlOpen error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:166 +msgid "JSON Error" +msgstr "" + +#: cinder/volume/drivers/coraid.py:170 +msgid "Request without URL" +msgstr "" + +#: cinder/volume/drivers/coraid.py:175 +#, python-format +msgid "Configure data : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:177 +#, python-format +msgid "Configure response : %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:199 +#, python-format +msgid "Unable to retrive volume infos for volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:289 +msgid "Cannot login on Coraid ESM" +msgstr "" + +#: cinder/volume/drivers/coraid.py:308 +#, fuzzy, python-format +msgid "Fail to create volume %(volname)s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/coraid.py:321 +#, fuzzy, python-format +msgid "Failed to delete volume %(volname)s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/coraid.py:335 +#, python-format +msgid "Failed to Create Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:347 +#, python-format +msgid "Failed to Delete Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:362 +#, python-format +msgid "Failed to Create Volume from Snapshot %(snapname)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:383 +#, python-format +msgid "" +"Failed to Initialize Connection. Volume Name: %(volname)s Shelf: " +"%(shelf)s, Lun: %(lun)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:65 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:70 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:80 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:98 cinder/volume/drivers/nfs.py:168 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:108 cinder/volume/drivers/nfs.py:178 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:168 cinder/volume/drivers/nfs.py:237 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:247 cinder/volume/drivers/nfs.py:321 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/lvm.py:82 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/lvm.py:197 +#, python-format +msgid "Size for volume: %s not found, skipping secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:204 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:218 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:238 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:268 cinder/volume/drivers/lvm.py:656 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:345 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:364 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:378 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:430 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/lvm.py:510 cinder/volume/drivers/lvm.py:524 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:542 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:583 +msgid "Error retrieving volume status: " +msgstr "" + +#: cinder/volume/drivers/nfs.py:141 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:146 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/rbd.py:64 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:87 cinder/volume/drivers/sheepdog.py:134 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:220 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:224 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:227 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "無效的快照(snapshot)" + +#: cinder/volume/drivers/rbd.py:242 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:253 +#, fuzzy, python-format +msgid "Unable to read image %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/scality.py:63 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:74 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:80 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:101 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:135 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:47 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:52 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:129 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:146 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:152 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:156 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:158 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:164 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:171 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:191 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:220 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:281 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:338 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:364 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:397 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:406 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:409 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:476 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:480 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:482 +msgid "This usually means the volume was never succesfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:497 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:500 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:504 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:509 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:558 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:566 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "內文解碼失敗" + +#: cinder/volume/drivers/storwize_svc.py:173 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:179 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:190 +#, python-format +msgid "pool %s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:206 +msgid "Failed to get license information." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:265 +msgid "do_setup: No configured nodes" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:267 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:289 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:295 +#: cinder/volume/drivers/netapp/iscsi.py:169 +#: cinder/volume/drivers/netapp/iscsi.py:1182 +#: cinder/volume/drivers/netapp/iscsi.py:1535 +#: cinder/volume/drivers/netapp/nfs.py:109 +#: cinder/volume/drivers/netapp/nfs.py:328 +#: cinder/volume/drivers/netapp/nfs.py:379 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:118 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:301 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:309 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:317 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:331 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:356 +#, python-format +msgid "enter: _get_chap_secret_for_host: host name %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:392 +#, python-format +msgid "" +"leave: _get_chap_secret_for_host: host name %(host_name)s with secret " +"%(chap_secret)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:415 +msgid "_create_host: Cannot clean host name. Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:483 +#, python-format +msgid "enter: _get_host_from_connector: prefix %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:511 +#, python-format +msgid "leave: _get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:523 +#, python-format +msgid "enter: _create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:538 +msgid "_create_host: No connector ports" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:551 +#, python-format +msgid "leave: _create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:574 +#, python-format +msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:609 +msgid "" +"storwize_svc_multihostmap_enabled is set to Flase, Not allow multi host " +"mapping" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:619 +#, python-format +msgid "volume %s mapping to multi host" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:625 +#, python-format +msgid "" +"leave: _map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host" +" %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:635 +#, python-format +msgid "enter: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:643 +#, python-format +msgid "leave: _delete_host: host %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:675 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:691 +msgid "_create_host failed to return the host name." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:702 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:709 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:711 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:728 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:736 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:769 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:774 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:792 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:802 +msgid "_get_host_from_connector failed to return the host name for connector" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:817 +#, python-format +msgid "" +"terminate_connection: No mapping of volume %(vol_name)s to host " +"%(host_name)s found" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:825 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:883 +msgid "protocol must be specified as ' iSCSI' or ' FC'" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:907 +#, python-format +msgid "enter: _create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:945 +#, python-format +msgid "" +"_create_vdisk %(name)s - did not find success message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:950 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:962 +#: cinder/volume/drivers/storwize_svc.py:976 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find success " +"message in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:988 +#: cinder/volume/drivers/storwize_svc.py:998 +#, python-format +msgid "" +"create FC mapping from %(source)s to %(target)s - did not find mapping id" +" in CLI output.\n" +" stdout: %(out)s\n" +" stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1012 +#, python-format +msgid "" +"_prepare_fc_map: Failed to prepare FlashCopy from %(source)s to " +"%(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1039 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1050 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within the allotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1055 +#, python-format +msgid "" +"_prepare_fc_map: Failed to start FlashCopy from %(source)s to %(target)s " +"with exception %(ex)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1062 +#, python-format +msgid "_prepare_fc_map: %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1069 +#, python-format +msgid "" +"_start_fc_map: Failed to start FlashCopy from %(source)s to %(target)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1080 +#, python-format +msgid "" +"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target" +" %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1092 +#, python-format +msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1100 +#, python-format +msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1107 +#, python-format +msgid "_create_copy: Source vdisk %s does not exist" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1119 +#, python-format +msgid "" +"_create_copy: cannot get source vdisk %(src)s capacity from vdisk " +"attributes %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1129 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1134 +#, python-format +msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1156 +#, python-format +msgid "" +"leave: _get_flashcopy_mapping_attributes: mapping %(fc_map_id)s, " +"attributes %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1164 +#, python-format +msgid "enter: _is_vdisk_defined: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1166 +#, python-format +msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1194 +#, python-format +msgid "enter: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1199 +#, python-format +msgid "warning: Tried to delete vdisk %s but it does not exist." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1225 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1258 +#, python-format +msgid "leave: _delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1283 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1297 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1361 +msgid "_update_volume_status: Could not get system name" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1373 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1374 +msgid "_update_volume_status: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1412 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1418 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1425 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1432 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1437 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1443 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1452 +msgid "" +"Multipath is currently only supported for FC connections and not iSCSI. " +"(This is a Nova limitation.)" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1464 +#, python-format +msgid "enter: _execute_command_and_parse_attributes: command %s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1471 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代碼: %(exit_code)s\n" +"標準輸出: %(stdout)r\n" +"標準錯誤輸出: %(stderr)r" + +#: cinder/volume/drivers/storwize_svc.py:1488 +#, python-format +msgid "" +"leave: _execute_command_and_parse_attributes:\n" +"command: %(cmd)s\n" +"attributes: %(attr)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1507 +#, python-format +msgid "" +"_get_hdr_dic: attribute headers and values do not match.\n" +" Headers: %(header)s\n" +" Values: %(row)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1517 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +"stdout: %(out)s\n" +"stderr: %(err)s\n" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1531 +#, python-format +msgid "" +"%(fun)s: Failed with unexpected CLI output.\n" +" Command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/storwize_svc.py:1539 +#, python-format +msgid "Did not find expected column in %(fun)s: %(hdr)s" +msgstr "" + +#: cinder/volume/drivers/windows.py:142 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows.py:207 +#: cinder/volume/drivers/nexenta/volume.py:203 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/windows.py:213 +#, python-format +msgid "Disk not found: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:218 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:240 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:350 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:447 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:42 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:78 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:82 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:90 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:97 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:106 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:114 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:129 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:136 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:143 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:151 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:156 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:166 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:176 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:187 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:196 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:217 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:229 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:240 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:256 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:265 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:277 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:286 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:291 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:301 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:311 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:320 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:341 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:353 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:364 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:380 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:389 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:401 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:410 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:412 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:419 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:429 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:437 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:441 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:454 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:463 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:470 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:474 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:486 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:565 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:500 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:516 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:525 +#, python-format +msgid "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:533 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:539 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:543 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:549 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:557 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:572 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:588 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:597 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:609 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:619 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:624 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:646 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:661 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:672 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:676 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:692 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:705 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:709 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:722 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:735 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:740 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:753 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:766 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:771 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:777 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:786 +#: cinder/volume/drivers/emc/emc_smis_common.py:816 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:800 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:806 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:830 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:836 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:848 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:880 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:883 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:899 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:902 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:924 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:944 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:947 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:954 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:966 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:979 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:992 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1005 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1049 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1055 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1061 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1077 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1109 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1112 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1125 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1148 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1179 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1183 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1243 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1282 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1295 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1307 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1319 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1354 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1397 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1402 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1412 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1434 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1456 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1484 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1513 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1519 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1531 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1541 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1543 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1559 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:158 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:167 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:197 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:204 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:221 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:113 +msgid "read timed out" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:138 +msgid "do_setup." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:144 +msgid "check_for_setup_error." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:149 +msgid "check_for_setup_error: Can not get device type." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:153 +#, python-format +msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:160 +msgid "" +"check_for_setup_error: Product version not right. Please make sure the " +"product version is V1." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:171 +msgid "_get_device_type: Storage Pool must be configured." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:180 +#, python-format +msgid "create_volume:volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:194 +#, python-format +msgid "delete_volume: volume name: %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:201 +#, python-format +msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:210 +#, python-format +msgid "create_export: volume name:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:214 +#, python-format +msgid "create_export:Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:234 +#, python-format +msgid "initialize_connection: volume name: %(volume)s. initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:249 +#, python-format +msgid "" +"initialize_connection:Failed to find target ip for " +"initiator:%(initiatorname)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:259 +#, python-format +msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:297 +#, python-format +msgid "" +"initialize_connection:host name: %(host)s, initiator name: %(ini)s, " +"hostport name: %(port)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:307 +#, python-format +msgid "" +"initialize_connection:Failed to find the given volume. volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:359 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:368 +#, python-format +msgid "terminate_connection:Host does not exist. Host name:%(host)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:377 +#, python-format +msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:396 +#, python-format +msgid "" +"terminate_connection:No map between host and volume. Host " +"name:%(hostname)s, volume name:%(volumename)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:414 +#, python-format +msgid "" +"terminate_connection:No initiator is added to the host. Host " +"name:%(hostname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:427 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:434 +msgid "create_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:440 +msgid "create_snapshot:Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:447 +#, python-format +msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:456 +#, python-format +msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:468 +#, python-format +msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:475 +msgid "delete_snapshot:Device does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:484 +#, python-format +msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:497 +#, python-format +msgid "" +"create_volume_from_snapshot:snapshot name:%(snapshot)s, volume " +"name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:505 +#, python-format +msgid "" +"create_volume_from_snapshot:Device does not support create volume from " +"snapshot. Volume name:%(volume)s, snapshot name:%(snapshot)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:516 +#, python-format +msgid "" +"create_volume_from_snapshot:Snapshot does not exist. Snapshot " +"name:%(name)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:567 +msgid "Config file is wrong. Controler IP, UserName and UserPassword must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:573 +#, python-format +msgid "_check_conf_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:584 +#, python-format +msgid "_read_xml:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:612 +#, python-format +msgid "Write login information to xml error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:616 +#, python-format +msgid "_get_login_info error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:638 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:665 +msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:676 +msgid "" +"_get_lun_set_info:No available pools! Please check whether storage pool " +"is created." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:686 +#, python-format +msgid "_get_lun_set_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:736 +msgid "" +"_get_maximum_pool:maxpoolid is None. Please check config file and make " +"sure the \"Name\" in \"StoragePool\" is right." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:754 +#, python-format +msgid "_get_iscsi_info:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:764 +#, python-format +msgid "CLI command:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:831 +#, python-format +msgid "_execute_cli:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:838 +#, python-format +msgid "" +"_name_translate:Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:872 +#, python-format +msgid "" +"_create_hostgroup:Failed to Create hostgroup. Hostgroup name: %(name)s. " +"out:%(out)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:901 +#, python-format +msgid "" +"_add_host:Failed to add host to hostgroup. host name:%(host)s hostgroup " +"id:%(hostgroup)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:927 +#, python-format +msgid "" +"_add_initiator:Failed to add initiator. initiator name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:941 +#, python-format +msgid "" +"_delete_initiator:ERROE:Failed to delete initiator. initiator " +"name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:991 +#, python-format +msgid "" +"_add_hostport:Failed to add hostport. port name:%(port)s port " +"information:%(info)s host id:%(host)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1009 +#, python-format +msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1016 +#, python-format +msgid "_get_tgt_iqn:iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1049 +#, python-format +msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1086 +#, python-format +msgid "" +"_map_lun:Failed to add hostmap. hostid:%(host)s lunid:%(lun)s " +"hostlunid:%(hostlunid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1122 +#, python-format +msgid "" +"_delete_map:There are IOs accessing the system. Retry to delete host map." +" map id:%(mapid)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1128 +#, python-format +msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1142 +#, python-format +msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1223 +#, python-format +msgid "" +"_active_snapshot:Failed to active snapshot. snapshot id:%(name)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1237 +#, python-format +msgid "" +"_disable_snapshot:Failed to disable snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1251 +#, python-format +msgid "" +"_delete_snapshot:Failed to delete snapshot. snapshot id:%(id)s. " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1308 +#, python-format +msgid "_create_volume:Failed to Create volume. volume name:%(name)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1321 +#, python-format +msgid "_delete_volume:Failed to delete volume. Volume name:%(name)s out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1338 +#, python-format +msgid "" +"_create_luncopy:Failed to Create LUNcopy. LUNcopy name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1352 +#, python-format +msgid "" +"_start_luncopy:Failed to start LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1382 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal. LUNcopy " +"name:%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1415 +#, python-format +msgid "" +"_delete_luncopy:Failed to delete LUNcopy. LUNcopy id:%(luncopyid)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1430 +#, python-format +msgid "" +"_create_snapshot:Failed to Create snapshot. Snapshot name:%(name)s " +"out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1468 +#, python-format +msgid "" +"_change_lun_controller:Failed to change lun owning controller. lun " +"id:%(lunid)s. new controller:%(controller)s. out:%(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_iscsi.py:1484 +msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:123 +#, python-format +msgid "API %(name)s failed: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:134 +#: cinder/volume/drivers/netapp/iscsi.py:1167 +#, python-format +msgid "Using WSDL: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:144 +#, python-format +msgid "Using DFM server: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:149 +#, python-format +msgid "Using storage service: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:154 +#, python-format +msgid "Using storage service prefix: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:160 +#, python-format +msgid "Using vfiler: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:173 +msgid "Either netapp_storage_service or netapp_storage_service_prefix must be set" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:204 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:282 +#, python-format +msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:317 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:346 +msgid "" +"Attempt to use volume_type without specifying " +"netapp_storage_service_prefix flag." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:350 +msgid "" +"You must set the netapp_storage_service flag in order to create volumes " +"with no volume_type." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:420 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:435 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:463 +#: cinder/volume/drivers/netapp/iscsi.py:1250 +#, python-format +msgid "No entry in LUN table for volume %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:481 +msgid "Failed to remove and delete dataset LUN member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:497 +msgid "Failed to remove and delete dataset Qtree member" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:538 +#, python-format +msgid "No entry in LUN table for volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:558 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:575 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:812 +#: cinder/volume/drivers/netapp/iscsi.py:859 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:821 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:826 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:930 +msgid "" +"Failed to get LUN list. Is the DFM host time-synchronized with Cinder " +"host?" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1042 +#: cinder/volume/drivers/netapp/iscsi.py:1381 +#: cinder/volume/drivers/netapp/iscsi.py:1699 +#: cinder/volume/drivers/netapp/nfs.py:74 +#, python-format +msgid "" +"Cannot create volume of size %(vol_size)s from snapshot of size " +"%(snap_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1054 +#, python-format +msgid "" +"Cannot create volume of type %(new_type)s from snapshot of type " +"%(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1075 +#: cinder/volume/drivers/netapp/nfs.py:273 +#, python-format +msgid "" +"Cannot create clone of size %(vol_size)s from volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1086 +#, python-format +msgid "Cannot create clone of type %(new_type)s from volume of type %(old_type)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1143 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1216 +#: cinder/volume/drivers/netapp/iscsi.py:1561 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1238 +#: cinder/volume/drivers/netapp/iscsi.py:1576 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1254 +#: cinder/volume/drivers/netapp/iscsi.py:1369 +#: cinder/volume/drivers/netapp/iscsi.py:1593 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1291 +#, python-format +msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1298 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(handle)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1303 +#: cinder/volume/drivers/netapp/iscsi.py:1637 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1307 +#: cinder/volume/drivers/netapp/iscsi.py:1648 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1311 +#: cinder/volume/drivers/netapp/iscsi.py:1651 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1344 +#, python-format +msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1365 +#, python-format +msgid "No entry in LUN table for snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1409 +#: cinder/volume/drivers/netapp/iscsi.py:1907 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/netapp/iscsi.py:2188 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1439 +#, fuzzy, python-format +msgid "Could not find handle for LUN named %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1458 +#: cinder/volume/drivers/netapp/iscsi.py:1936 +#, python-format +msgid "" +"Cannot clone volume of size %(vol_size)s from src volume of size " +"%(src_vol_size)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1518 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1585 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1628 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1632 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1688 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1717 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1735 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1811 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1829 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1923 +#, python-format +msgid "Could not find attribute for LUN named %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2057 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2192 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2377 +#, python-format +msgid "Error finding luns for volume %(vol)s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2481 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:2484 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:312 +#, python-format +msgid "" +"Cloning with params ip %(host_ip)s, exp_path\n" +" %(export_path)s, vol %(volume_name)s,\n" +" clone_name %(clone_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:461 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:487 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:493 +#, python-format +msgid "" +"Cloning with params volume %(volume)s,src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:547 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:555 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:211 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:220 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:228 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:238 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:274 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/volume.py:282 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:74 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:80 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:88 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:118 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:171 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:235 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:148 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:80 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:257 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts: '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:261 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:446 +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:104 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:103 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:493 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:532 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:569 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:622 +#, fuzzy, python-format +msgid "Unexpected state while cloning %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:97 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:96 +msgid "Login to 3PAR array invalid" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_fc.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:237 +#, python-format +msgid "The hostname must be called '%s'" +msgstr "" + diff --git a/cinder/manager.py b/cinder/manager.py new file mode 100644 index 0000000000..5f52e56865 --- /dev/null +++ b/cinder/manager.py @@ -0,0 +1,221 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base Manager class. + +Managers are responsible for a certain aspect of the system. It is a logical +grouping of code relating to a portion of the system. In general other +components should be using the manager to make changes to the components that +it is responsible for. + +For example, other components that need to deal with volumes in some way, +should do so by calling methods on the VolumeManager instead of directly +changing fields in the database. This allows us to keep all of the code +relating to volumes in the same place. + +We have adopted a basic strategy of Smart managers and dumb data, which means +rather than attaching methods to data objects, components should call manager +methods that act on the data. + +Methods on managers that can be executed locally should be called directly. If +a particular method must execute on a remote host, this should be done via rpc +to the service that wraps the manager + +Managers should be responsible for most of the db access, and +non-implementation specific data. Anything implementation specific that can't +be generalized should be done by the Driver. + +In general, we prefer to have one manager with multiple drivers for different +implementations, but sometimes it makes sense to have multiple managers. You +can think of it this way: Abstract different overall strategies at the manager +level(FlatNetwork vs VlanNetwork), and different implementations at the driver +level(LinuxNetDriver vs CiscoNetDriver). + +Managers will often provide methods for initial setup of a host or periodic +tasks to a wrapping service. + +This module provides Manager, a base class for managers. + +""" + +from cinder.db import base +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder import version + + +FLAGS = flags.FLAGS + + +LOG = logging.getLogger(__name__) + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on every tick + of the periodic scheduler. + + 2. With arguments, @periodic_task(ticks_between_runs=N), this will be + run on every N ticks of the periodic scheduler. + """ + def decorator(f): + f._periodic_task = True + f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parens. + # + # In the 'with-parens' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parens' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class ManagerMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(ManagerMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._ticks_to_skip = cls._ticks_to_skip.copy() + except AttributeError: + cls._ticks_to_skip = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + cls._periodic_tasks.append((name, task)) + cls._ticks_to_skip[name] = task._ticks_between_runs + + +class Manager(base.Base): + __metaclass__ = ManagerMeta + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self, host=None, db_driver=None): + if not host: + host = FLAGS.host + self.host = host + super(Manager, self).__init__(db_driver) + + def create_rpc_dispatcher(self): + '''Get the rpc dispatcher for this manager. + + If a manager would like to set an rpc API version, or support more than + one class as the target of rpc messages, override this method. + ''' + return rpc_dispatcher.RpcDispatcher([self]) + + def periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + ticks_to_skip = self._ticks_to_skip[task_name] + if ticks_to_skip > 0: + LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" + " ticks left until next run"), locals()) + self._ticks_to_skip[task_name] -= 1 + continue + + self._ticks_to_skip[task_name] = task._ticks_between_runs + LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_("Error during %(full_task_name)s: %(e)s"), + locals()) + + def init_host(self): + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ + pass + + def service_version(self, context): + return version.version_string() + + def service_config(self, context): + config = {} + for key in FLAGS: + config[key] = FLAGS.get(key, None) + return config + + +class SchedulerDependentManager(Manager): + """Periodically send capability updates to the Scheduler services. + + Services that need to update the Scheduler of their capabilities + should derive from this class. Otherwise they can derive from + manager.Manager directly. Updates are only sent after + update_service_capabilities is called with non-None values. + + """ + + def __init__(self, host=None, db_driver=None, service_name='undefined'): + self.last_capabilities = None + self.service_name = service_name + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + super(SchedulerDependentManager, self).__init__(host, db_driver) + + def update_service_capabilities(self, capabilities): + """Remember these capabilities to send on next periodic update.""" + self.last_capabilities = capabilities + + @periodic_task + def _publish_service_capabilities(self, context): + """Pass data back to the scheduler at a periodic interval.""" + if self.last_capabilities: + LOG.debug(_('Notifying Schedulers of capabilities ...')) + self.scheduler_rpcapi.update_service_capabilities( + context, + self.service_name, + self.host, + self.last_capabilities) diff --git a/cinder/openstack/__init__.py b/cinder/openstack/__init__.py new file mode 100644 index 0000000000..0a3b98867a --- /dev/null +++ b/cinder/openstack/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/README b/cinder/openstack/common/README new file mode 100644 index 0000000000..def4a172aa --- /dev/null +++ b/cinder/openstack/common/README @@ -0,0 +1,13 @@ +openstack-common +---------------- + +A number of modules from openstack-common are imported into this project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. diff --git a/cinder/openstack/common/__init__.py b/cinder/openstack/common/__init__.py new file mode 100644 index 0000000000..0a3b98867a --- /dev/null +++ b/cinder/openstack/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/context.py b/cinder/openstack/common/context.py new file mode 100644 index 0000000000..e9cfd73cc1 --- /dev/null +++ b/cinder/openstack/common/context.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools +import uuid + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + + """ + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + return {'user': self.user, + 'tenant': self.tenant, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id} + + +def get_admin_context(show_deleted="no"): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None diff --git a/cinder/openstack/common/eventlet_backdoor.py b/cinder/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000000..c0ad460fe6 --- /dev/null +++ b/cinder/openstack/common/eventlet_backdoor.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gc +import pprint +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +eventlet_backdoor_opts = [ + cfg.IntOpt('backdoor_port', + default=None, + help='port for eventlet backdoor to listen') +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) + + +def _dont_use_this(): + print "Don't use this, just disconnect instead" + + +def _find_objects(t): + return filter(lambda o: isinstance(o, t), gc.get_objects()) + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print i, gt + traceback.print_stack(gt.gr_frame) + print + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print threadId + traceback.print_stack(stack) + print + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = eventlet.listen(('localhost', CONF.backdoor_port)) + port = sock.getsockname()[1] + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/cinder/openstack/common/exception.py b/cinder/openstack/common/exception.py new file mode 100644 index 0000000000..c8690157f5 --- /dev/null +++ b/cinder/openstack/common/exception.py @@ -0,0 +1,142 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exceptions common to OpenStack projects +""" + +import logging + +from cinder.openstack.common.gettextutils import _ + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class Error(Exception): + def __init__(self, message=None): + super(Error, self).__init__(message) + + +class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): + self.message = message + self.code = code + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class UnknownScheme(Error): + + msg = "Unknown scheme '%s' found in URI" + + def __init__(self, scheme): + msg = self.__class__.msg % scheme + super(UnknownScheme, self).__init__(msg) + + +class BadStoreUri(Error): + + msg = "The Store URI %s was malformed. Reason: %s" + + def __init__(self, uri, reason): + msg = self.__class__.msg % (uri, reason) + super(BadStoreUri, self).__init__(msg) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class BadInputError(Exception): + """Error resulting from a client sending bad input to a server""" + pass + + +class MissingArgumentError(Error): + pass + + +class DatabaseMigrationError(Error): + pass + + +class ClientConnectionError(Exception): + """Error resulting from a client connecting to a server""" + pass + + +def wrap_exception(f): + def _wrap(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + if not isinstance(e, Error): + #exc_type, exc_value, exc_traceback = sys.exc_info() + logging.exception(_('Uncaught exception')) + #logging.error(traceback.extract_stack(exc_traceback)) + raise Error(str(e)) + raise + _wrap.func_name = f.func_name + return _wrap + + +class OpenstackException(Exception): + """ + Base Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = "An unknown exception occurred" + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + + except Exception as e: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise e + else: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +class MalformedRequestBody(OpenstackException): + message = "Malformed message body: %(reason)s" + + +class InvalidContentType(OpenstackException): + message = "Invalid content type %(content_type)s" diff --git a/cinder/openstack/common/excutils.py b/cinder/openstack/common/excutils.py new file mode 100644 index 0000000000..ccb2d072e9 --- /dev/null +++ b/cinder/openstack/common/excutils.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import contextlib +import logging +import sys +import traceback + +from cinder.openstack.common.gettextutils import _ + + +@contextlib.contextmanager +def save_and_reraise_exception(): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + """ + type_, value, tb = sys.exc_info() + try: + yield + except Exception: + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(type_, value, tb)) + raise + raise type_, value, tb diff --git a/cinder/openstack/common/fileutils.py b/cinder/openstack/common/fileutils.py new file mode 100644 index 0000000000..b988ad03d5 --- /dev/null +++ b/cinder/openstack/common/fileutils.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import os + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise diff --git a/cinder/openstack/common/gettextutils.py b/cinder/openstack/common/gettextutils.py new file mode 100644 index 0000000000..5c0540b43d --- /dev/null +++ b/cinder/openstack/common/gettextutils.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from cinder.openstack.common.gettextutils import _ +""" + +import gettext +import os + +_localedir = os.environ.get('cinder'.upper() + '_LOCALEDIR') +_t = gettext.translation('cinder', localedir=_localedir, fallback=True) + + +def _(msg): + return _t.ugettext(msg) + + +def install(domain): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + """ + gettext.install(domain, + localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), + unicode=True) diff --git a/cinder/openstack/common/importutils.py b/cinder/openstack/common/importutils.py new file mode 100644 index 0000000000..3bd277f47e --- /dev/null +++ b/cinder/openstack/common/importutils.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """ + Import a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/cinder/openstack/common/jsonutils.py b/cinder/openstack/common/jsonutils.py new file mode 100644 index 0000000000..70134d4192 --- /dev/null +++ b/cinder/openstack/common/jsonutils.py @@ -0,0 +1,167 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import datetime +import functools +import inspect +import itertools +import json +import types +import xmlrpclib + +from cinder.openstack.common import timeutils + + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (types.NoneType, int, basestring, bool, float, long) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return unicode(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in value.iteritems()) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + else: + if any(test(value) for test in _nasty_type_tests): + return unicode(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s): + return json.loads(s) + + +def load(s): + return json.load(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/cinder/openstack/common/local.py b/cinder/openstack/common/local.py new file mode 100644 index 0000000000..f1bfc824bf --- /dev/null +++ b/cinder/openstack/common/local.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Greenthread local storage of variables using weak references""" + +import weakref + +from eventlet import corolocal + + +class WeakLocal(corolocal.local): + def __getattribute__(self, attr): + rval = corolocal.local.__getattribute__(self, attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return corolocal.local.__setattr__(self, attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = corolocal.local diff --git a/cinder/openstack/common/lockutils.py b/cinder/openstack/common/lockutils.py new file mode 100644 index 0000000000..f21f0d9186 --- /dev/null +++ b/cinder/openstack/common/lockutils.py @@ -0,0 +1,278 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import functools +import os +import shutil +import tempfile +import time +import weakref + +from eventlet import semaphore +from oslo.config import cfg + +from cinder.openstack.common import fileutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import local +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + help=('Directory to use for lock files. Default to a ' + 'temp directory')) +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +def set_defaults(lock_path): + cfg.set_defaults(util_opts, lock_path=lock_path) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() + + +def synchronized(name, lock_file_prefix, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the foo method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + + The external keyword argument denotes whether this lock should work across + multiple processes. This means that if two different workers both run a + a method decorated with @synchronized('mylock', external=True), only one + of them will execute at a time. + + The lock_path keyword argument is used to specify a special location for + external lock files to live. If nothing is set, then CONF.lock_path is + used as a default. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + # NOTE(soren): If we ever go natively threaded, this will be racy. + # See http://stackoverflow.com/questions/5390569/dyn + # amically-allocating-and-destroying-mutexes + sem = _semaphores.get(name, semaphore.Semaphore()) + if name not in _semaphores: + # this check is not racy - we're already holding ref locally + # so GC won't remove the item and there was no IO switch + # (only valid in greenthreads) + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + + # NOTE(mikal): I know this looks odd + if not hasattr(local.strong_store, 'locks_held'): + local.strong_store.locks_held = [] + local.strong_store.locks_held.append(name) + + try: + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s" ' + 'for method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + cleanup_dir = False + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path + if not local_lock_path: + local_lock_path = CONF.lock_path + + if not local_lock_path: + cleanup_dir = True + local_lock_path = tempfile.mkdtemp() + + if not os.path.exists(local_lock_path): + fileutils.ensure_tree(local_lock_path) + + # NOTE(mikal): the lock name cannot contain directory + # separators + safe_name = name.replace(os.sep, '_') + lock_file_name = '%s%s' % (lock_file_prefix, safe_name) + lock_file_path = os.path.join(local_lock_path, + lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock: + LOG.debug(_('Got file lock "%(lock)s" at ' + '%(path)s for method ' + '"%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + retval = f(*args, **kwargs) + finally: + LOG.debug(_('Released file lock "%(lock)s" at ' + '%(path)s for method "%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + # NOTE(vish): This removes the tempdir if we needed + # to create one. This is used to + # cleanup the locks left behind by unit + # tests. + if cleanup_dir: + shutil.rmtree(local_lock_path) + else: + retval = f(*args, **kwargs) + + finally: + local.strong_store.locks_held.remove(name) + + return retval + return inner + return wrap + + +def synchronized_with_prefix(lock_file_prefix): + """Partial object generator for the synchronization decorator. + + Redefine @synchronized in each project like so:: + + (in nova/utils.py) + from nova.openstack.common import lockutils + + synchronized = lockutils.synchronized_with_prefix('nova-') + + + (in nova/foo.py) + from nova import utils + + @utils.synchronized('mylock') + def bar(self, *args): + ... + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + """ + + return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) diff --git a/cinder/openstack/common/log.py b/cinder/openstack/common/log.py new file mode 100644 index 0000000000..c170971f95 --- /dev/null +++ b/cinder/openstack/common/log.py @@ -0,0 +1,540 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import ConfigParser +import cStringIO +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import stat +import sys +import traceback + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import jsonutils +from cinder.openstack.common import local +from cinder.openstack.common import notifier + + +_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=_DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user)s %(tenant)s] ' + '%(instance)s%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqplib=WARN', + 'sqlalchemy=WARN', + 'boto=WARN', + 'suds=INFO', + 'keystone=INFO', + 'eventlet.wsgi.server=WARN' + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + +class ContextAdapter(logging.LoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + else: + instance_uuid = kwargs.pop('instance_uuid', None) + if instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"project": self.project}) + extra.update({"version": self.version}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [itertools.ifilter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('cinder.openstack.common.notifier.log_notifier' in + CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.msg)) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(type, value, tb): + extra = {} + if CONF.verbose: + extra['exc_info'] = (type, value, tb) + getLogger(product_name).critical(str(value), **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config): + try: + logging.config.fileConfig(log_config) + except ConfigParser.Error, exc: + raise LogConfigError(log_config, str(exc)) + + +def setup(product_name): + """Setup logging.""" + if CONF.log_config: + _load_log_config(CONF.log_config) + else: + _setup_logging_from_conf() + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_conf(): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + mode = int(CONF.logfile_mode, 8) + st = os.stat(logpath) + if st.st_mode != (stat.S_IFREG | mode): + os.chmod(logpath, mode) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not CONF.log_file: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + + for handler in log_root.handlers: + datefmt = CONF.log_date_format + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + else: + handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) + + +class LegacyFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formating params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/cinder/openstack/common/loopingcall.py b/cinder/openstack/common/loopingcall.py new file mode 100644 index 0000000000..8be3a00eb4 --- /dev/null +++ b/cinder/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +# TODO(mikal): this class name is deprecated in Havana and should be removed +# in the I release +LoopingCall = FixedIntervalLoopingCall + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug(_('Dynamic looping call sleeping for %.02f ' + 'seconds'), idle) + greenthread.sleep(idle) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/cinder/openstack/common/network_utils.py b/cinder/openstack/common/network_utils.py new file mode 100644 index 0000000000..5224e01aa9 --- /dev/null +++ b/cinder/openstack/common/network_utils.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +import logging + +LOG = logging.getLogger(__name__) + + +def parse_host_port(address, default_port=None): + """ + Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) diff --git a/cinder/openstack/common/notifier/__init__.py b/cinder/openstack/common/notifier/__init__.py new file mode 100644 index 0000000000..45c3b46ae9 --- /dev/null +++ b/cinder/openstack/common/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/notifier/api.py b/cinder/openstack/common/notifier/api.py new file mode 100644 index 0000000000..6b82e4451e --- /dev/null +++ b/cinder/openstack/common/notifier/api.py @@ -0,0 +1,182 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg + +from cinder.openstack.common import context +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +notifier_opts = [ + cfg.MultiStrOpt('notification_driver', + default=[], + help='Driver or drivers to handle sending notifications'), + cfg.StrOpt('default_notification_level', + default='INFO', + help='Default notification level for outgoing notifications'), + cfg.StrOpt('default_publisher_id', + default='$host', + help='Default publisher_id for outgoing notifications'), +] + +CONF = cfg.CONF +CONF.register_opts(notifier_opts) + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + + """ + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + + ctxt = context.get_context_from_function_and_args(fn, args, kwarg) + notify(ctxt, + CONF.default_publisher_id, + name, + CONF.default_notification_level, + body) + return fn(*args, **kwarg) + return wrapped_func + + +def publisher_id(service, host=None): + if not host: + host = CONF.host + return "%s.%s" % (service, host) + + +def notify(context, publisher_id, event_type, priority, payload): + """Sends a notification using the specified driver + + :param publisher_id: the source worker_type.host of the message + :param event_type: the literal type of event (ex. Instance Creation) + :param priority: patterned after the enumeration of Python logging + levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + :param payload: A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id + a UUID representing the id for this notification + + timestamp + the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example:: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': timeutils.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities') % priority) + + # Ensure everything is JSON serializable. + payload = jsonutils.to_primitive(payload, convert_instances=True) + + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(timeutils.utcnow())) + + for driver in _get_drivers(): + try: + driver.notify(context, msg) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system. " + "Payload=%(payload)s") + % dict(e=e, payload=payload)) + + +_drivers = None + + +def _get_drivers(): + """Instantiate, cache, and return drivers based on the CONF.""" + global _drivers + if _drivers is None: + _drivers = {} + for notification_driver in CONF.notification_driver: + add_driver(notification_driver) + + return _drivers.values() + + +def add_driver(notification_driver): + """Add a notification driver at runtime.""" + # Make sure the driver list is initialized. + _get_drivers() + if isinstance(notification_driver, basestring): + # Load and add + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) + else: + # Driver is already loaded; just add the object. + _drivers[notification_driver] = notification_driver + + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global _drivers + _drivers = None diff --git a/cinder/openstack/common/notifier/log_notifier.py b/cinder/openstack/common/notifier/log_notifier.py new file mode 100644 index 0000000000..010d29cea3 --- /dev/null +++ b/cinder/openstack/common/notifier/log_notifier.py @@ -0,0 +1,35 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging + + +CONF = cfg.CONF + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model. + Log notifications using openstack's default logging system""" + + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'cinder.openstack.common.notification.%s' % + message['event_type']) + getattr(logger, priority)(jsonutils.dumps(message)) diff --git a/cinder/openstack/common/notifier/no_op_notifier.py b/cinder/openstack/common/notifier/no_op_notifier.py new file mode 100644 index 0000000000..bc7a56ca7a --- /dev/null +++ b/cinder/openstack/common/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/cinder/openstack/common/notifier/rabbit_notifier.py b/cinder/openstack/common/notifier/rabbit_notifier.py new file mode 100644 index 0000000000..2ffe9524e9 --- /dev/null +++ b/cinder/openstack/common/notifier/rabbit_notifier.py @@ -0,0 +1,29 @@ +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import rpc_notifier + +LOG = logging.getLogger(__name__) + + +def notify(context, message): + """Deprecated in Grizzly. Please use rpc_notifier instead.""" + + LOG.deprecated(_("The rabbit_notifier is now deprecated." + " Please use rpc_notifier instead.")) + rpc_notifier.notify(context, message) diff --git a/cinder/openstack/common/notifier/rpc_notifier.py b/cinder/openstack/common/notifier/rpc_notifier.py new file mode 100644 index 0000000000..46a95a17c9 --- /dev/null +++ b/cinder/openstack/common/notifier/rpc_notifier.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder.openstack.common import context as req_context +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'notification_topics', default=['notifications', ], + help='AMQP topic used for openstack notifications') + +CONF = cfg.CONF +CONF.register_opt(notification_topic_opt) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/cinder/openstack/common/notifier/rpc_notifier2.py b/cinder/openstack/common/notifier/rpc_notifier2.py new file mode 100644 index 0000000000..62a8eda53d --- /dev/null +++ b/cinder/openstack/common/notifier/rpc_notifier2.py @@ -0,0 +1,52 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''messaging based notification driver, with message envelopes''' + +from oslo.config import cfg + +from cinder.openstack.common import context as req_context +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'topics', default=['notifications', ], + help='AMQP topic(s) used for openstack notifications') + +opt_group = cfg.OptGroup(name='rpc_notifier2', + title='Options for rpc_notifier2') + +CONF = cfg.CONF +CONF.register_group(opt_group) +CONF.register_opt(notification_topic_opt, opt_group) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.rpc_notifier2.topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message, envelope=True) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/cinder/openstack/common/notifier/test_notifier.py b/cinder/openstack/common/notifier/test_notifier.py new file mode 100644 index 0000000000..96c1746bf4 --- /dev/null +++ b/cinder/openstack/common/notifier/test_notifier.py @@ -0,0 +1,22 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +NOTIFICATIONS = [] + + +def notify(_context, message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/cinder/openstack/common/policy.py b/cinder/openstack/common/policy.py new file mode 100644 index 0000000000..0ca48ce9e8 --- /dev/null +++ b/cinder/openstack/common/policy.py @@ -0,0 +1,301 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Common Policy Engine Implementation""" + +import logging +import urllib +import urllib2 + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import jsonutils + + +LOG = logging.getLogger(__name__) + + +_BRAIN = None + + +def set_brain(brain): + """Set the brain used by enforce(). + + Defaults use Brain() if not set. + + """ + global _BRAIN + _BRAIN = brain + + +def reset(): + """Clear the brain used by enforce().""" + global _BRAIN + _BRAIN = None + + +def enforce(match_list, target_dict, credentials_dict, exc=None, + *args, **kwargs): + """Enforces authorization of some rules against credentials. + + :param match_list: nested tuples of data to match against + + The basic brain supports three types of match lists: + + 1) rules + + looks like: ``('rule:compute:get_instance',)`` + + Retrieves the named rule from the rules dict and recursively + checks against the contents of the rule. + + 2) roles + + looks like: ``('role:compute:admin',)`` + + Matches if the specified role is in credentials_dict['roles']. + + 3) generic + + looks like: ``('tenant_id:%(tenant_id)s',)`` + + Substitutes values from the target dict into the match using + the % operator and matches them against the creds dict. + + Combining rules: + + The brain returns True if any of the outer tuple of rules + match and also True if all of the inner tuples match. You + can use this to perform simple boolean logic. For + example, the following rule would return True if the creds + contain the role 'admin' OR the if the tenant_id matches + the target dict AND the the creds contains the role + 'compute_sysadmin': + + :: + + { + "rule:combined": ( + 'role:admin', + ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin') + ) + } + + Note that rule and role are reserved words in the credentials match, so + you can't match against properties with those names. Custom brains may + also add new reserved words. For example, the HttpBrain adds http as a + reserved word. + + :param target_dict: dict of object properties + + Target dicts contain as much information as we can about the object being + operated on. + + :param credentials_dict: dict of actor properties + + Credentials dicts contain as much information as we can about the user + performing the action. + + :param exc: exception to raise + + Class of the exception to raise if the check fails. Any remaining + arguments passed to enforce() (both positional and keyword arguments) + will be passed to the exception class. If exc is not provided, returns + False. + + :return: True if the policy allows the action + :return: False if the policy does not allow the action and exc is not set + """ + global _BRAIN + if not _BRAIN: + _BRAIN = Brain() + if not _BRAIN.check(match_list, target_dict, credentials_dict): + if exc: + raise exc(*args, **kwargs) + return False + return True + + +class Brain(object): + """Implements policy checking.""" + + _checks = {} + + @classmethod + def _register(cls, name, func): + cls._checks[name] = func + + @classmethod + def load_json(cls, data, default_rule=None): + """Init a brain using json instead of a rules dictionary.""" + rules_dict = jsonutils.loads(data) + return cls(rules=rules_dict, default_rule=default_rule) + + def __init__(self, rules=None, default_rule=None): + if self.__class__ != Brain: + LOG.warning(_("Inheritance-based rules are deprecated; use " + "the default brain instead of %s.") % + self.__class__.__name__) + + self.rules = rules or {} + self.default_rule = default_rule + + def add_rule(self, key, match): + self.rules[key] = match + + def _check(self, match, target_dict, cred_dict): + try: + match_kind, match_value = match.split(':', 1) + except Exception: + LOG.exception(_("Failed to understand rule %(match)r") % locals()) + # If the rule is invalid, fail closed + return False + + func = None + try: + old_func = getattr(self, '_check_%s' % match_kind) + except AttributeError: + func = self._checks.get(match_kind, self._checks.get(None, None)) + else: + LOG.warning(_("Inheritance-based rules are deprecated; update " + "_check_%s") % match_kind) + func = lambda brain, kind, value, target, cred: old_func(value, + target, + cred) + + if not func: + LOG.error(_("No handler for matches of kind %s") % match_kind) + # Fail closed + return False + + return func(self, match_kind, match_value, target_dict, cred_dict) + + def check(self, match_list, target_dict, cred_dict): + """Checks authorization of some rules against credentials. + + Detailed description of the check with examples in policy.enforce(). + + :param match_list: nested tuples of data to match against + :param target_dict: dict of object properties + :param credentials_dict: dict of actor properties + + :returns: True if the check passes + + """ + if not match_list: + return True + for and_list in match_list: + if isinstance(and_list, basestring): + and_list = (and_list,) + if all([self._check(item, target_dict, cred_dict) + for item in and_list]): + return True + return False + + +class HttpBrain(Brain): + """A brain that can check external urls for policy. + + Posts json blobs for target and credentials. + + Note that this brain is deprecated; the http check is registered + by default. + """ + + pass + + +def register(name, func=None): + """ + Register a function as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default function + will be registered. + :param func: If given, provides the function to register. If not + given, returns a function taking one argument to + specify the function to register, allowing use as a + decorator. + """ + + # Perform the actual decoration by registering the function. + # Returns the function for compliance with the decorator + # interface. + def decorator(func): + # Register the function + Brain._register(name, func) + return func + + # If the function is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +def _check_rule(brain, match_kind, match, target_dict, cred_dict): + """Recursively checks credentials based on the brains rules.""" + try: + new_match_list = brain.rules[match] + except KeyError: + if brain.default_rule and match != brain.default_rule: + new_match_list = ('rule:%s' % brain.default_rule,) + else: + return False + + return brain.check(new_match_list, target_dict, cred_dict) + + +@register("role") +def _check_role(brain, match_kind, match, target_dict, cred_dict): + """Check that there is a matching role in the cred dict.""" + return match.lower() in [x.lower() for x in cred_dict['roles']] + + +@register('http') +def _check_http(brain, match_kind, match, target_dict, cred_dict): + """Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response is + exactly 'True'. A custom brain using response codes could easily + be implemented. + + """ + url = 'http:' + (match % target_dict) + data = {'target': jsonutils.dumps(target_dict), + 'credentials': jsonutils.dumps(cred_dict)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +def _check_generic(brain, match_kind, match, target_dict, cred_dict): + """Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + + """ + + # TODO(termie): do dict inspection via dot syntax + match = match % target_dict + if match_kind in cred_dict: + return match == unicode(cred_dict[match_kind]) + return False diff --git a/cinder/openstack/common/processutils.py b/cinder/openstack/common/processutils.py new file mode 100644 index 0000000000..47e7d08b1e --- /dev/null +++ b/cinder/openstack/common/processutils.py @@ -0,0 +1,181 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import os +import random +import shlex +import signal + +from eventlet.green import subprocess +from eventlet import greenthread + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" + % (description, cmd, exit_code, stdout, stderr)) + super(ProcessExecutionError, self).__init__(message) + + +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def execute(*cmd, **kwargs): + """ + Helper method to shell out and execute a command through subprocess with + optional retry. + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + :param process_input: Send to opened process. + :type proces_input: string + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix to commands called with + run_as_root=True + :type root_helper: string + :param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + shell = kwargs.pop('shell', False) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if len(kwargs): + raise UnknownArgumentError(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=('Command requested root, but did not specify a root ' + 'helper.')) + cmd = shlex.split(root_helper) + list(cmd) + + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) diff --git a/cinder/openstack/common/rootwrap/__init__.py b/cinder/openstack/common/rootwrap/__init__.py new file mode 100644 index 0000000000..2d32e4ef31 --- /dev/null +++ b/cinder/openstack/common/rootwrap/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/rootwrap/cmd.py b/cinder/openstack/common/rootwrap/cmd.py new file mode 100755 index 0000000000..78265e30ca --- /dev/null +++ b/cinder/openstack/common/rootwrap/cmd.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for OpenStack services + + Filters which commands a service is allowed to run as another user. + + To use this with cinder, you should set the following in + cinder.conf: + rootwrap_config=/etc/cinder/rootwrap.conf + + You also need to let the cinder user run cinder-rootwrap + as root in sudoers: + cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap + /etc/cinder/rootwrap.conf * + + Service packaging should deploy .filters files only on nodes where + they are needed, to avoid allowing more than is necessary. +""" + +import ConfigParser +import logging +import os +import pwd +import signal +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_NOEXECFOUND = 96 + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def _exit_error(execname, message, errorcode, log=True): + print "%s: %s" % (execname, message) + if log: + logging.error(message) + sys.exit(errorcode) + + +def main(): + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) < 2: + _exit_error(execname, "No command specified", RC_NOCOMMAND, log=False) + + configfile = sys.argv.pop(0) + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from cinder.openstack.common.rootwrap import wrapper + + # Load configuration + try: + rawconfig = ConfigParser.RawConfigParser() + rawconfig.read(configfile) + config = wrapper.RootwrapConfig(rawconfig) + except ValueError as exc: + msg = "Incorrect value in %s: %s" % (configfile, exc.message) + _exit_error(execname, msg, RC_BADCONFIG, log=False) + except ConfigParser.Error: + _exit_error(execname, "Incorrect configuration file: %s" % configfile, + RC_BADCONFIG, log=False) + + if config.use_syslog: + wrapper.setup_syslog(execname, + config.syslog_log_facility, + config.syslog_log_level) + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(config.filters_path) + try: + filtermatch = wrapper.match_filter(filters, userargs, + exec_dirs=config.exec_dirs) + if filtermatch: + command = filtermatch.get_command(userargs, + exec_dirs=config.exec_dirs) + if config.use_syslog: + logging.info("(%s > %s) Executing %s (filter match = %s)" % ( + os.getlogin(), pwd.getpwuid(os.getuid())[0], + command, filtermatch.name)) + + obj = subprocess.Popen(command, + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + preexec_fn=_subprocess_setup, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + except wrapper.FilterMatchNotExecutable as exc: + msg = ("Executable not found: %s (filter match = %s)" + % (exc.match.exec_path, exc.match.name)) + _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog) + + except wrapper.NoFilterMatched: + msg = ("Unauthorized command: %s (no filter matched)" + % ' '.join(userargs)) + _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog) diff --git a/cinder/openstack/common/rootwrap/filters.py b/cinder/openstack/common/rootwrap/filters.py new file mode 100644 index 0000000000..d9618af883 --- /dev/null +++ b/cinder/openstack/common/rootwrap/filters.py @@ -0,0 +1,226 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re + + +class CommandFilter(object): + """Command filter only checking that the 1st argument matches exec_path""" + + def __init__(self, exec_path, run_as, *args): + self.name = '' + self.exec_path = exec_path + self.run_as = run_as + self.args = args + self.real_exec = None + + def get_exec(self, exec_dirs=[]): + """Returns existing executable, or empty string if none found""" + if self.real_exec is not None: + return self.real_exec + self.real_exec = "" + if self.exec_path.startswith('/'): + if os.access(self.exec_path, os.X_OK): + self.real_exec = self.exec_path + else: + for binary_path in exec_dirs: + expanded_path = os.path.join(binary_path, self.exec_path) + if os.access(expanded_path, os.X_OK): + self.real_exec = expanded_path + break + return self.real_exec + + def match(self, userargs): + """Only check that the first argument (command) matches exec_path""" + if (os.path.basename(self.exec_path) == userargs[0]): + return True + return False + + def get_command(self, userargs, exec_dirs=[]): + """Returns command to execute (with sudo -u if run_as != root).""" + to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path + if (self.run_as != 'root'): + # Used to run commands at lesser privileges + return ['sudo', '-u', self.run_as, to_exec] + userargs[1:] + return [to_exec] + userargs[1:] + + def get_environment(self, userargs): + """Returns specific environment to set, None if none""" + return None + + +class RegExpFilter(CommandFilter): + """Command filter doing regexp matching for every argument""" + + def match(self, userargs): + # Early skip if command or number of args don't match + if (len(self.args) != len(userargs)): + # DENY: argument numbers don't match + return False + # Compare each arg (anchoring pattern explicitly at end of string) + for (pattern, arg) in zip(self.args, userargs): + try: + if not re.match(pattern + '$', arg): + break + except re.error: + # DENY: Badly-formed filter + return False + else: + # ALLOW: All arguments matched + return True + + # DENY: Some arguments did not match + return False + + +class PathFilter(CommandFilter): + """Command filter checking that path arguments are within given dirs + + One can specify the following constraints for command arguments: + 1) pass - pass an argument as is to the resulting command + 2) some_str - check if an argument is equal to the given string + 3) abs path - check if a path argument is within the given base dir + + A typical rootwrapper filter entry looks like this: + # cmdname: filter name, raw command, user, arg_i_constraint [, ...] + chown: PathFilter, /bin/chown, root, nova, /var/lib/images + + """ + + def match(self, userargs): + command, arguments = userargs[0], userargs[1:] + + equal_args_num = len(self.args) == len(arguments) + exec_is_valid = super(PathFilter, self).match(userargs) + args_equal_or_pass = all( + arg == 'pass' or arg == value + for arg, value in zip(self.args, arguments) + if not os.path.isabs(arg) # arguments not specifying abs paths + ) + paths_are_within_base_dirs = all( + os.path.commonprefix([arg, os.path.realpath(value)]) == arg + for arg, value in zip(self.args, arguments) + if os.path.isabs(arg) # arguments specifying abs paths + ) + + return (equal_args_num and + exec_is_valid and + args_equal_or_pass and + paths_are_within_base_dirs) + + def get_command(self, userargs, exec_dirs=[]): + command, arguments = userargs[0], userargs[1:] + + # convert path values to canonical ones; copy other args as is + args = [os.path.realpath(value) if os.path.isabs(arg) else value + for arg, value in zip(self.args, arguments)] + + return super(PathFilter, self).get_command([command] + args, + exec_dirs) + + +class DnsmasqFilter(CommandFilter): + """Specific filter for the dnsmasq call (which includes env)""" + + CONFIG_FILE_ARG = 'CONFIG_FILE' + + def match(self, userargs): + if (userargs[0] == 'env' and + userargs[1].startswith(self.CONFIG_FILE_ARG) and + userargs[2].startswith('NETWORK_ID=') and + userargs[3] == 'dnsmasq'): + return True + return False + + def get_command(self, userargs, exec_dirs=[]): + to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path + dnsmasq_pos = userargs.index('dnsmasq') + return [to_exec] + userargs[dnsmasq_pos + 1:] + + def get_environment(self, userargs): + env = os.environ.copy() + env[self.CONFIG_FILE_ARG] = userargs[1].split('=')[-1] + env['NETWORK_ID'] = userargs[2].split('=')[-1] + return env + + +class DeprecatedDnsmasqFilter(DnsmasqFilter): + """Variant of dnsmasq filter to support old-style FLAGFILE""" + CONFIG_FILE_ARG = 'FLAGFILE' + + +class KillFilter(CommandFilter): + """Specific filter for the kill calls. + 1st argument is the user to run /bin/kill under + 2nd argument is the location of the affected executable + Subsequent arguments list the accepted signals (if any) + + This filter relies on /proc to accurately determine affected + executable, so it will only work on procfs-capable systems (not OSX). + """ + + def __init__(self, *args): + super(KillFilter, self).__init__("/bin/kill", *args) + + def match(self, userargs): + if userargs[0] != "kill": + return False + args = list(userargs) + if len(args) == 3: + # A specific signal is requested + signal = args.pop(1) + if signal not in self.args[1:]: + # Requested signal not in accepted list + return False + else: + if len(args) != 2: + # Incorrect number of arguments + return False + if len(self.args) > 1: + # No signal requested, but filter requires specific signal + return False + try: + command = os.readlink("/proc/%d/exe" % int(args[1])) + # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on + # the end if an executable is updated or deleted + if command.endswith(" (deleted)"): + command = command[:command.rindex(" ")] + if command != self.args[0]: + # Affected executable does not match + return False + except (ValueError, OSError): + # Incorrect PID + return False + return True + + +class ReadFileFilter(CommandFilter): + """Specific filter for the utils.read_file_as_root call""" + + def __init__(self, file_path, *args): + self.file_path = file_path + super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) + + def match(self, userargs): + if userargs[0] != 'cat': + return False + if userargs[1] != self.file_path: + return False + if len(userargs) != 2: + return False + return True diff --git a/cinder/openstack/common/rootwrap/wrapper.py b/cinder/openstack/common/rootwrap/wrapper.py new file mode 100644 index 0000000000..a8ab123905 --- /dev/null +++ b/cinder/openstack/common/rootwrap/wrapper.py @@ -0,0 +1,149 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import ConfigParser +import logging +import logging.handlers +import os +import string + +from cinder.openstack.common.rootwrap import filters + + +class NoFilterMatched(Exception): + """This exception is raised when no filter matched.""" + pass + + +class FilterMatchNotExecutable(Exception): + """ + This exception is raised when a filter matched but no executable was + found. + """ + def __init__(self, match=None, **kwargs): + self.match = match + + +class RootwrapConfig(object): + + def __init__(self, config): + # filters_path + self.filters_path = config.get("DEFAULT", "filters_path").split(",") + + # exec_dirs + if config.has_option("DEFAULT", "exec_dirs"): + self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") + else: + # Use system PATH if exec_dirs is not specified + self.exec_dirs = os.environ["PATH"].split(':') + + # syslog_log_facility + if config.has_option("DEFAULT", "syslog_log_facility"): + v = config.get("DEFAULT", "syslog_log_facility") + facility_names = logging.handlers.SysLogHandler.facility_names + self.syslog_log_facility = getattr(logging.handlers.SysLogHandler, + v, None) + if self.syslog_log_facility is None and v in facility_names: + self.syslog_log_facility = facility_names.get(v) + if self.syslog_log_facility is None: + raise ValueError('Unexpected syslog_log_facility: %s' % v) + else: + default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG + self.syslog_log_facility = default_facility + + # syslog_log_level + if config.has_option("DEFAULT", "syslog_log_level"): + v = config.get("DEFAULT", "syslog_log_level") + self.syslog_log_level = logging.getLevelName(v.upper()) + if (self.syslog_log_level == "Level %s" % v.upper()): + raise ValueError('Unexepected syslog_log_level: %s' % v) + else: + self.syslog_log_level = logging.ERROR + + # use_syslog + if config.has_option("DEFAULT", "use_syslog"): + self.use_syslog = config.getboolean("DEFAULT", "use_syslog") + else: + self.use_syslog = False + + +def setup_syslog(execname, facility, level): + rootwrap_logger = logging.getLogger() + rootwrap_logger.setLevel(level) + handler = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + handler.setFormatter(logging.Formatter( + os.path.basename(execname) + ': %(message)s')) + rootwrap_logger.addHandler(handler) + + +def build_filter(class_name, *args): + """Returns a filter object of class class_name""" + if not hasattr(filters, class_name): + logging.warning("Skipping unknown filter class (%s) specified " + "in filter definitions" % class_name) + return None + filterclass = getattr(filters, class_name) + return filterclass(*args) + + +def load_filters(filters_path): + """Load filters from a list of directories""" + filterlist = [] + for filterdir in filters_path: + if not os.path.isdir(filterdir): + continue + for filterfile in os.listdir(filterdir): + filterconfig = ConfigParser.RawConfigParser() + filterconfig.read(os.path.join(filterdir, filterfile)) + for (name, value) in filterconfig.items("Filters"): + filterdefinition = [string.strip(s) for s in value.split(',')] + newfilter = build_filter(*filterdefinition) + if newfilter is None: + continue + newfilter.name = name + filterlist.append(newfilter) + return filterlist + + +def match_filter(filters, userargs, exec_dirs=[]): + """ + Checks user command and arguments through command filters and + returns the first matching filter. + Raises NoFilterMatched if no filter matched. + Raises FilterMatchNotExecutable if no executable was found for the + best filter match. + """ + first_not_executable_filter = None + + for f in filters: + if f.match(userargs): + # Try other filters if executable is absent + if not f.get_exec(exec_dirs=exec_dirs): + if not first_not_executable_filter: + first_not_executable_filter = f + continue + # Otherwise return matching filter for execution + return f + + if first_not_executable_filter: + # A filter matched, but no executable was found for it + raise FilterMatchNotExecutable(match=first_not_executable_filter) + + # No filter matched + raise NoFilterMatched() diff --git a/cinder/openstack/common/rpc/__init__.py b/cinder/openstack/common/rpc/__init__.py new file mode 100644 index 0000000000..3ffce83325 --- /dev/null +++ b/cinder/openstack/common/rpc/__init__.py @@ -0,0 +1,307 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A remote procedure call (rpc) abstraction. + +For some wrappers that add message versioning to rpc, see: + rpc.dispatcher + rpc.proxy +""" + +import inspect +import logging + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import local + + +LOG = logging.getLogger(__name__) + + +rpc_opts = [ + cfg.StrOpt('rpc_backend', + default='%s.impl_kombu' % __package__, + help="The messaging module to use, defaults to kombu."), + cfg.IntOpt('rpc_thread_pool_size', + default=64, + help='Size of RPC thread pool'), + cfg.IntOpt('rpc_conn_pool_size', + default=30, + help='Size of RPC connection pool'), + cfg.IntOpt('rpc_response_timeout', + default=60, + help='Seconds to wait for a response from call or multicall'), + cfg.IntOpt('rpc_cast_timeout', + default=30, + help='Seconds to wait before a cast expires (TTL). ' + 'Only supported by impl_zmq.'), + cfg.ListOpt('allowed_rpc_exception_modules', + default=['cinder.openstack.common.exception', + 'nova.exception', + 'cinder.exception', + 'exceptions', + ], + help='Modules of exceptions that are permitted to be recreated' + 'upon receiving exception data from an rpc call.'), + cfg.BoolOpt('fake_rabbit', + default=False, + help='If passed, use a fake RabbitMQ provider'), + cfg.StrOpt('control_exchange', + default='openstack', + help='AMQP exchange to connect to if using RabbitMQ or Qpid'), +] + +CONF = cfg.CONF +CONF.register_opts(rpc_opts) + + +def set_defaults(control_exchange): + cfg.set_defaults(rpc_opts, + control_exchange=control_exchange) + + +def create_connection(new=True): + """Create a connection to the message bus used for rpc. + + For some example usage of creating a connection and some consumers on that + connection, see nova.service. + + :param new: Whether or not to create a new connection. A new connection + will be created by default. If new is False, the + implementation is free to return an existing connection from a + pool. + + :returns: An instance of openstack.common.rpc.common.Connection + """ + return _get_impl().create_connection(CONF, new=new) + + +def _check_for_lock(): + if not CONF.debug: + return None + + if ((hasattr(local.strong_store, 'locks_held') + and local.strong_store.locks_held)): + stack = ' :: '.join([frame[3] for frame in inspect.stack()]) + LOG.warn(_('A RPC is being made while holding a lock. The locks ' + 'currently held are %(locks)s. This is probably a bug. ' + 'Please report it. Include the following: [%(stack)s].'), + {'locks': local.strong_store.locks_held, + 'stack': stack}) + return True + + return False + + +def call(context, topic, msg, timeout=None, check_for_lock=False): + """Invoke a remote method that returns something. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + :param check_for_lock: if True, a warning is emitted if a RPC call is made + with a lock held. + + :returns: A dict from the remote method. + + :raises: openstack.common.rpc.common.Timeout if a complete response + is not received before the timeout is reached. + """ + if check_for_lock: + _check_for_lock() + return _get_impl().call(CONF, context, topic, msg, timeout) + + +def cast(context, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast(CONF, context, topic, msg) + + +def fanout_cast(context, topic, msg): + """Broadcast a remote method invocation with no return. + + This method will get invoked on all consumers that were set up with this + topic name and fanout=True. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=True. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast(CONF, context, topic, msg) + + +def multicall(context, topic, msg, timeout=None, check_for_lock=False): + """Invoke a remote method and get back an iterator. + + In this case, the remote method will be returning multiple values in + separate messages, so the return values can be processed as the come in via + an iterator. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + :param check_for_lock: if True, a warning is emitted if a RPC call is made + with a lock held. + + :returns: An iterator. The iterator will yield a tuple (N, X) where N is + an index that starts at 0 and increases by one for each value + returned and X is the Nth value that was returned by the remote + method. + + :raises: openstack.common.rpc.common.Timeout if a complete response + is not received before the timeout is reached. + """ + if check_for_lock: + _check_for_lock() + return _get_impl().multicall(CONF, context, topic, msg, timeout) + + +def notify(context, topic, msg, envelope=False): + """Send notification event. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the notification to. + :param msg: This is a dict of content of event. + :param envelope: Set to True to enable message envelope for notifications. + + :returns: None + """ + return _get_impl().notify(cfg.CONF, context, topic, msg, envelope) + + +def cleanup(): + """Clean up resoruces in use by implementation. + + Clean up any resources that have been allocated by the RPC implementation. + This is typically open connections to a messaging service. This function + would get called before an application using this API exits to allow + connections to get torn down cleanly. + + :returns: None + """ + return _get_impl().cleanup() + + +def cast_to_server(context, server_params, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast_to_server(CONF, context, server_params, topic, + msg) + + +def fanout_cast_to_server(context, server_params, topic, msg): + """Broadcast to a remote method invocation with no return. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast_to_server(CONF, context, server_params, + topic, msg) + + +def queue_get_for(context, topic, host): + """Get a queue name for a given topic + host. + + This function only works if this naming convention is followed on the + consumer side, as well. For example, in nova, every instance of the + nova-foo service calls create_consumer() for two topics: + + foo + foo. + + Messages sent to the 'foo' topic are distributed to exactly one instance of + the nova-foo service. The services are chosen in a round-robin fashion. + Messages sent to the 'foo.' topic are sent to the nova-foo service on + . + """ + return '%s.%s' % (topic, host) if host else topic + + +_RPCIMPL = None + + +def _get_impl(): + """Delay import of rpc_backend until configuration is loaded.""" + global _RPCIMPL + if _RPCIMPL is None: + try: + _RPCIMPL = importutils.import_module(CONF.rpc_backend) + except ImportError: + # For backwards compatibility with older nova config. + impl = CONF.rpc_backend.replace('nova.rpc', + 'nova.openstack.common.rpc') + _RPCIMPL = importutils.import_module(impl) + return _RPCIMPL diff --git a/cinder/openstack/common/rpc/amqp.py b/cinder/openstack/common/rpc/amqp.py new file mode 100644 index 0000000000..9addfa1c76 --- /dev/null +++ b/cinder/openstack/common/rpc/amqp.py @@ -0,0 +1,677 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Shared code between AMQP based openstack.common.rpc implementations. + +The code in this module is shared between the rpc implemenations based on AMQP. +Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses +AMQP, but is deprecated and predates this code. +""" + +import collections +import inspect +import sys +import uuid + +from eventlet import greenpool +from eventlet import pools +from eventlet import queue +from eventlet import semaphore +# TODO(pekowsk): Remove import cfg and below comment in Havana. +# This import should no longer be needed when the amqp_rpc_single_reply_queue +# option is removed. +from oslo.config import cfg + +from cinder.openstack.common import excutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import local +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import common as rpc_common + + +# TODO(pekowski): Remove this option in Havana. +amqp_opts = [ + cfg.BoolOpt('amqp_rpc_single_reply_queue', + default=False, + help='Enable a fast single reply queue if using AMQP based ' + 'RPC like RabbitMQ or Qpid.'), +] + +cfg.CONF.register_opts(amqp_opts) + +UNIQUE_ID = '_unique_id' +LOG = logging.getLogger(__name__) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + def __init__(self, conf, connection_cls, *args, **kwargs): + self.connection_cls = connection_cls + self.conf = conf + kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) + kwargs.setdefault("order_as_stack", True) + super(Pool, self).__init__(*args, **kwargs) + self.reply_proxy = None + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug(_('Pool creating new connection')) + return self.connection_cls(self.conf) + + def empty(self): + while self.free_items: + self.get().close() + # Force a new connection pool to be created. + # Note that this was added due to failing unit test cases. The issue + # is the above "while loop" gets all the cached connections from the + # pool and closes them, but never returns them to the pool, a pool + # leak. The unit tests hang waiting for an item to be returned to the + # pool. The unit tests get here via the teatDown() method. In the run + # time code, it gets here via cleanup() and only appears in service.py + # just before doing a sys.exit(), so cleanup() only happens once and + # the leakage is not a problem. + self.connection_cls.pool = None + + +_pool_create_sem = semaphore.Semaphore() + + +def get_connection_pool(conf, connection_cls): + with _pool_create_sem: + # Make sure only one thread tries to create the connection pool. + if not connection_cls.pool: + connection_cls.pool = Pool(conf, connection_cls) + return connection_cls.pool + + +class ConnectionContext(rpc_common.Connection): + """The class that is actually returned to the caller of + create_connection(). This is essentially a wrapper around + Connection that supports 'with'. It can also return a new + Connection, or one from a pool. The function will also catch + when an instance of this class is to be deleted. With that + we can return Connections to the pool on exceptions and so + forth without making the caller be responsible for catching + them. If possible the function makes sure to return a + connection to the pool. + """ + + def __init__(self, conf, connection_pool, pooled=True, server_params=None): + """Create a new connection, or get one from the pool""" + self.connection = None + self.conf = conf + self.connection_pool = connection_pool + if pooled: + self.connection = connection_pool.get() + else: + self.connection = connection_pool.connection_cls( + conf, + server_params=server_params) + self.pooled = pooled + + def __enter__(self): + """When with ConnectionContext() is used, return self""" + return self + + def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + self.connection_pool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + pass + self.connection = None + + def __exit__(self, exc_type, exc_value, tb): + """End of 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Caller is done with this connection. Make sure we cleaned up.""" + self._done() + + def close(self): + """Caller is done with this connection.""" + self._done() + + def create_consumer(self, topic, proxy, fanout=False): + self.connection.create_consumer(topic, proxy, fanout) + + def create_worker(self, topic, proxy, pool_name): + self.connection.create_worker(topic, proxy, pool_name) + + def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + self.connection.join_consumer_pool(callback, + pool_name, + topic, + exchange_name) + + def consume_in_thread(self): + self.connection.consume_in_thread() + + def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" + if self.connection: + return getattr(self.connection, key) + else: + raise rpc_common.InvalidRPCConnectionReuse() + + +class ReplyProxy(ConnectionContext): + """ Connection class for RPC replies / callbacks """ + def __init__(self, conf, connection_pool): + self._call_waiters = {} + self._num_call_waiters = 0 + self._num_call_waiters_wrn_threshhold = 10 + self._reply_q = 'reply_' + uuid.uuid4().hex + super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) + self.declare_direct_consumer(self._reply_q, self._process_data) + self.consume_in_thread() + + def _process_data(self, message_data): + msg_id = message_data.pop('_msg_id', None) + waiter = self._call_waiters.get(msg_id) + if not waiter: + LOG.warn(_('no calling threads waiting for msg_id : %s' + ', message : %s') % (msg_id, message_data)) + else: + waiter.put(message_data) + + def add_call_waiter(self, waiter, msg_id): + self._num_call_waiters += 1 + if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: + LOG.warn(_('Number of call waiters is greater than warning ' + 'threshhold: %d. There could be a MulticallProxyWaiter ' + 'leak.') % self._num_call_waiters_wrn_threshhold) + self._num_call_waiters_wrn_threshhold *= 2 + self._call_waiters[msg_id] = waiter + + def del_call_waiter(self, msg_id): + self._num_call_waiters -= 1 + del self._call_waiters[msg_id] + + def get_reply_q(self): + return self._reply_q + + +def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, + failure=None, ending=False, log_failure=True): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext(conf, connection_pool) as conn: + if failure: + failure = rpc_common.serialize_remote_exception(failure, + log_failure) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + if ending: + msg['ending'] = True + _add_unique_id(msg) + # If a reply_q exists, add the msg_id to the reply and pass the + # reply_q to direct_send() to use it as the response queue. + # Otherwise use the msg_id for backward compatibilty. + if reply_q: + msg['_msg_id'] = msg_id + conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) + else: + conn.direct_send(msg_id, rpc_common.serialize_msg(msg)) + + +class RpcContext(rpc_common.CommonRpcContext): + """Context that supports replying to a rpc.call""" + def __init__(self, **kwargs): + self.msg_id = kwargs.pop('msg_id', None) + self.reply_q = kwargs.pop('reply_q', None) + self.conf = kwargs.pop('conf') + super(RpcContext, self).__init__(**kwargs) + + def deepcopy(self): + values = self.to_dict() + values['conf'] = self.conf + values['msg_id'] = self.msg_id + values['reply_q'] = self.reply_q + return self.__class__(**values) + + def reply(self, reply=None, failure=None, ending=False, + connection_pool=None, log_failure=True): + if self.msg_id: + msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool, + reply, failure, ending, log_failure) + if ending: + self.msg_id = None + + +def unpack_context(conf, msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + context_dict['reply_q'] = msg.pop('_reply_q', None) + context_dict['conf'] = conf + ctx = RpcContext.from_dict(context_dict) + rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) + return ctx + + +def pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class _MsgIdCache(object): + """This class checks any duplicate messages.""" + + # NOTE: This value is considered can be a configuration item, but + # it is not necessary to change its value in most cases, + # so let this value as static for now. + DUP_MSG_CHECK_SIZE = 16 + + def __init__(self, **kwargs): + self.prev_msgids = collections.deque([], + maxlen=self.DUP_MSG_CHECK_SIZE) + + def check_duplicate_message(self, message_data): + """AMQP consumers may read same message twice when exceptions occur + before ack is returned. This method prevents doing it. + """ + if UNIQUE_ID in message_data: + msg_id = message_data[UNIQUE_ID] + if msg_id not in self.prev_msgids: + self.prev_msgids.append(msg_id) + else: + raise rpc_common.DuplicateMessageError(msg_id=msg_id) + + +def _add_unique_id(msg): + """Add unique_id for checking duplicate messages.""" + unique_id = uuid.uuid4().hex + msg.update({UNIQUE_ID: unique_id}) + LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) + + +class _ThreadPoolWithWait(object): + """Base class for a delayed invocation manager used by + the Connection class to start up green threads + to handle incoming messages. + """ + + def __init__(self, conf, connection_pool): + self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) + self.connection_pool = connection_pool + self.conf = conf + + def wait(self): + """Wait for all callback threads to exit.""" + self.pool.waitall() + + +class CallbackWrapper(_ThreadPoolWithWait): + """Wraps a straight callback to allow it to be invoked in a green + thread. + """ + + def __init__(self, conf, callback, connection_pool): + """ + :param conf: cfg.CONF instance + :param callback: a callable (probably a function) + :param connection_pool: connection pool as returned by + get_connection_pool() + """ + super(CallbackWrapper, self).__init__( + conf=conf, + connection_pool=connection_pool, + ) + self.callback = callback + + def __call__(self, message_data): + self.pool.spawn_n(self.callback, message_data) + + +class ProxyCallback(_ThreadPoolWithWait): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, conf, proxy, connection_pool): + super(ProxyCallback, self).__init__( + conf=conf, + connection_pool=connection_pool, + ) + self.proxy = proxy + self.msg_id_cache = _MsgIdCache() + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + # It is important to clear the context here, because at this point + # the previous context is stored in local.store.context + if hasattr(local.store, 'context'): + del local.store.context + rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + self.msg_id_cache.check_duplicate_message(message_data) + ctxt = unpack_context(self.conf, message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + version = message_data.get('version') + namespace = message_data.get('namespace') + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data, + connection_pool=self.connection_pool) + return + self.pool.spawn_n(self._process_data, ctxt, version, method, + namespace, args) + + def _process_data(self, ctxt, version, method, namespace, args): + """Process a message in a new thread. + + If the proxy object we have has a dispatch method + (see rpc.dispatcher.RpcDispatcher), pass it the version, + method, and args and let it dispatch as appropriate. If not, use + the old behavior of magically calling the specified method on the + proxy we have here. + """ + ctxt.update_store() + try: + rval = self.proxy.dispatch(ctxt, version, method, namespace, + **args) + # Check if the result was a generator + if inspect.isgenerator(rval): + for x in rval: + ctxt.reply(x, None, connection_pool=self.connection_pool) + else: + ctxt.reply(rval, None, connection_pool=self.connection_pool) + # This final None tells multicall that it is done. + ctxt.reply(ending=True, connection_pool=self.connection_pool) + except rpc_common.ClientException as e: + LOG.debug(_('Expected exception during message handling (%s)') % + e._exc_info[1]) + ctxt.reply(None, e._exc_info, + connection_pool=self.connection_pool, + log_failure=False) + except Exception: + # sys.exc_info() is deleted by LOG.exception(). + exc_info = sys.exc_info() + LOG.error(_('Exception during message handling'), + exc_info=exc_info) + ctxt.reply(None, exc_info, connection_pool=self.connection_pool) + + +class MulticallProxyWaiter(object): + def __init__(self, conf, msg_id, timeout, connection_pool): + self._msg_id = msg_id + self._timeout = timeout or conf.rpc_response_timeout + self._reply_proxy = connection_pool.reply_proxy + self._done = False + self._got_ending = False + self._conf = conf + self._dataqueue = queue.LightQueue() + # Add this caller to the reply proxy's call_waiters + self._reply_proxy.add_call_waiter(self, self._msg_id) + self.msg_id_cache = _MsgIdCache() + + def put(self, data): + self._dataqueue.put(data) + + def done(self): + if self._done: + return + self._done = True + # Remove this caller from reply proxy's call_waiters + self._reply_proxy.del_call_waiter(self._msg_id) + + def _process_data(self, data): + result = None + self.msg_id_cache.check_duplicate_message(data) + if data['failure']: + failure = data['failure'] + result = rpc_common.deserialize_remote_exception(self._conf, + failure) + elif data.get('ending', False): + self._got_ending = True + else: + result = data['result'] + return result + + def __iter__(self): + """Return a result until we get a reply with an 'ending" flag""" + if self._done: + raise StopIteration + while True: + try: + data = self._dataqueue.get(timeout=self._timeout) + result = self._process_data(data) + except queue.Empty: + self.done() + raise rpc_common.Timeout() + except Exception: + with excutils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + if isinstance(result, Exception): + self.done() + raise result + yield result + + +#TODO(pekowski): Remove MulticallWaiter() in Havana. +class MulticallWaiter(object): + def __init__(self, conf, connection, timeout): + self._connection = connection + self._iterator = connection.iterconsume(timeout=timeout or + conf.rpc_response_timeout) + self._result = None + self._done = False + self._got_ending = False + self._conf = conf + self.msg_id_cache = _MsgIdCache() + + def done(self): + if self._done: + return + self._done = True + self._iterator.close() + self._iterator = None + self._connection.close() + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + self.msg_id_cache.check_duplicate_message(data) + if data['failure']: + failure = data['failure'] + self._result = rpc_common.deserialize_remote_exception(self._conf, + failure) + + elif data.get('ending', False): + self._got_ending = True + else: + self._result = data['result'] + + def __iter__(self): + """Return a result until we get a 'None' response from consumer""" + if self._done: + raise StopIteration + while True: + try: + self._iterator.next() + except Exception: + with excutils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + result = self._result + if isinstance(result, Exception): + self.done() + raise result + yield result + + +def create_connection(conf, new, connection_pool): + """Create a connection""" + return ConnectionContext(conf, connection_pool, pooled=not new) + + +_reply_proxy_create_sem = semaphore.Semaphore() + + +def multicall(conf, context, topic, msg, timeout, connection_pool): + """Make a call that returns multiple times.""" + # TODO(pekowski): Remove all these comments in Havana. + # For amqp_rpc_single_reply_queue = False, + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + # For amqp_rpc_single_reply_queue = True, + # The 'with' statement is mandatory for closing the connection + LOG.debug(_('Making synchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _add_unique_id(msg) + pack_context(msg, context) + + # TODO(pekowski): Remove this flag and the code under the if clause + # in Havana. + if not conf.amqp_rpc_single_reply_queue: + conn = ConnectionContext(conf, connection_pool) + wait_msg = MulticallWaiter(conf, conn, timeout) + conn.declare_direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) + else: + with _reply_proxy_create_sem: + if not connection_pool.reply_proxy: + connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) + msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) + wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) + return wait_msg + + +def call(conf, context, topic, msg, timeout, connection_pool): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout, connection_pool) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg, connection_pool): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg)) + + +def fanout_cast(conf, context, topic, msg, connection_pool): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.fanout_send(topic, rpc_common.serialize_msg(msg)) + + +def cast_to_server(conf, context, server_params, topic, msg, connection_pool): + """Sends a message on a topic to a specific server.""" + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg, + connection_pool): + """Sends a message on a fanout exchange to a specific server.""" + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.fanout_send(topic, rpc_common.serialize_msg(msg)) + + +def notify(conf, context, topic, msg, connection_pool, envelope): + """Sends a notification event on a topic.""" + LOG.debug(_('Sending %(event_type)s on %(topic)s'), + dict(event_type=msg.get('event_type'), + topic=topic)) + _add_unique_id(msg) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + if envelope: + msg = rpc_common.serialize_msg(msg) + conn.notify_send(topic, msg) + + +def cleanup(connection_pool): + if connection_pool: + connection_pool.empty() + + +def get_control_exchange(conf): + return conf.control_exchange diff --git a/cinder/openstack/common/rpc/common.py b/cinder/openstack/common/rpc/common.py new file mode 100644 index 0000000000..9f0552e5e9 --- /dev/null +++ b/cinder/openstack/common/rpc/common.py @@ -0,0 +1,508 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import sys +import traceback + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import local +from cinder.openstack.common import log as logging + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +'''RPC Envelope Version. + +This version number applies to the top level structure of messages sent out. +It does *not* apply to the message payload, which must be versioned +independently. For example, when using rpc APIs, a version number is applied +for changes to the API being exposed over rpc. This version number is handled +in the rpc proxy and dispatcher modules. + +This version number applies to the message envelope that is used in the +serialization done inside the rpc layer. See serialize_msg() and +deserialize_msg(). + +The current message format (version 2.0) is very simple. It is: + + { + 'oslo.version': , + 'oslo.message': + } + +Message format version '1.0' is just considered to be the messages we sent +without a message envelope. + +So, the current message envelope just includes the envelope version. It may +eventually contain additional information, such as a signature for the message +payload. + +We will JSON encode the application message payload. The message envelope, +which includes the JSON encoded application message body, will be passed down +to the messaging libraries as a dict. +''' +_RPC_ENVELOPE_VERSION = '2.0' + +_VERSION_KEY = 'oslo.version' +_MESSAGE_KEY = 'oslo.message' + + +class RPCException(Exception): + message = _("An unknown RPC related exception occurred.") + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if not message: + try: + message = self.message % kwargs + + except Exception: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + # at least get the core message out if something happened + message = self.message + + super(RPCException, self).__init__(message) + + +class RemoteError(RPCException): + """Signifies that a remote class has raised an exception. + + Contains a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevant info. + + """ + message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") + + def __init__(self, exc_type=None, value=None, traceback=None): + self.exc_type = exc_type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__(exc_type=exc_type, + value=value, + traceback=traceback) + + +class Timeout(RPCException): + """Signifies that a timeout has occurred. + + This exception is raised if the rpc_response_timeout is reached while + waiting for a response from the remote side. + """ + message = _('Timeout while waiting on RPC response - ' + 'topic: "%(topic)s", RPC method: "%(method)s" ' + 'info: "%(info)s"') + + def __init__(self, info=None, topic=None, method=None): + """ + :param info: Extra info to convey to the user + :param topic: The topic that the rpc call was sent to + :param rpc_method_name: The name of the rpc method being + called + """ + self.info = info + self.topic = topic + self.method = method + super(Timeout, self).__init__( + None, + info=info or _(''), + topic=topic or _(''), + method=method or _('')) + + +class DuplicateMessageError(RPCException): + message = _("Found duplicate message(%(msg_id)s). Skipping it.") + + +class InvalidRPCConnectionReuse(RPCException): + message = _("Invalid reuse of an RPC connection.") + + +class UnsupportedRpcVersion(RPCException): + message = _("Specified RPC version, %(version)s, not supported by " + "this endpoint.") + + +class UnsupportedRpcEnvelopeVersion(RPCException): + message = _("Specified RPC envelope version, %(version)s, " + "not supported by this endpoint.") + + +class Connection(object): + """A connection, returned by rpc.create_connection(). + + This class represents a connection to the message bus used for rpc. + An instance of this class should never be created by users of the rpc API. + Use rpc.create_connection() instead. + """ + def close(self): + """Close the connection. + + This method must be called when the connection will no longer be used. + It will ensure that any resources associated with the connection, such + as a network connection, and cleaned up. + """ + raise NotImplementedError() + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer on this connection. + + A consumer is associated with a message queue on the backend message + bus. The consumer will read messages from the queue, unpack them, and + dispatch them to the proxy object. The contents of the message pulled + off of the queue will determine which method gets called on the proxy + object. + + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. For example, all instances of nova-compute consume + from a queue called "compute". In that case, the + messages will get distributed amongst the consumers in a + round-robin fashion if fanout=False. If fanout=True, + every consumer associated with this topic will get a + copy of every message. + :param proxy: The object that will handle all incoming messages. + :param fanout: Whether or not this is a fanout topic. See the + documentation for the topic parameter for some + additional comments on this. + """ + raise NotImplementedError() + + def create_worker(self, topic, proxy, pool_name): + """Create a worker on this connection. + + A worker is like a regular consumer of messages directed to a + topic, except that it is part of a set of such consumers (the + "pool") which may run in parallel. Every pool of workers will + receive a given message, but only one worker in the pool will + be asked to process it. Load is distributed across the members + of the pool in round-robin fashion. + + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. + :param proxy: The object that will handle all incoming messages. + :param pool_name: String containing the name of the pool of workers + """ + raise NotImplementedError() + + def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + + :param callback: Callable to be invoked for each message. + :type callback: callable accepting one argument + :param pool_name: The name of the consumer pool. + :type pool_name: str + :param topic: The routing topic for desired messages. + :type topic: str + :param exchange_name: The name of the message exchange where + the client should attach. Defaults to + the configured exchange. + :type exchange_name: str + """ + raise NotImplementedError() + + def consume_in_thread(self): + """Spawn a thread to handle incoming messages. + + Spawn a thread that will be responsible for handling all incoming + messages for consumers that were set up on this connection. + + Message dispatching inside of this is expected to be implemented in a + non-blocking manner. An example implementation would be having this + thread pull messages in for all of the consumers, but utilize a thread + pool for dispatching the messages to the proxy objects. + """ + raise NotImplementedError() + + +def _safe_log(log_func, msg, msg_data): + """Sanitizes the msg_data field before logging.""" + SANITIZE = {'set_admin_password': [('args', 'new_pass')], + 'run_instance': [('args', 'admin_password')], + 'route_message': [('args', 'message', 'args', 'method_info', + 'method_kwargs', 'password'), + ('args', 'message', 'args', 'method_info', + 'method_kwargs', 'admin_password')]} + + has_method = 'method' in msg_data and msg_data['method'] in SANITIZE + has_context_token = '_context_auth_token' in msg_data + has_token = 'auth_token' in msg_data + + if not any([has_method, has_context_token, has_token]): + return log_func(msg, msg_data) + + msg_data = copy.deepcopy(msg_data) + + if has_method: + for arg in SANITIZE.get(msg_data['method'], []): + try: + d = msg_data + for elem in arg[:-1]: + d = d[elem] + d[arg[-1]] = '' + except KeyError, e: + LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), + {'item': arg, + 'err': e}) + + if has_context_token: + msg_data['_context_auth_token'] = '' + + if has_token: + msg_data['auth_token'] = '' + + return log_func(msg, msg_data) + + +def serialize_remote_exception(failure_info, log_failure=True): + """Prepares exception data to be sent over rpc. + + Failure_info should be a sys.exc_info() tuple. + + """ + tb = traceback.format_exception(*failure_info) + failure = failure_info[1] + if log_failure: + LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(tb) + + kwargs = {} + if hasattr(failure, 'kwargs'): + kwargs = failure.kwargs + + data = { + 'class': str(failure.__class__.__name__), + 'module': str(failure.__class__.__module__), + 'message': unicode(failure), + 'tb': tb, + 'args': failure.args, + 'kwargs': kwargs + } + + json_data = jsonutils.dumps(data) + + return json_data + + +def deserialize_remote_exception(conf, data): + failure = jsonutils.loads(str(data)) + + trace = failure.get('tb', []) + message = failure.get('message', "") + "\n" + "\n".join(trace) + name = failure.get('class') + module = failure.get('module') + + # NOTE(ameade): We DO NOT want to allow just any module to be imported, in + # order to prevent arbitrary code execution. + if module not in conf.allowed_rpc_exception_modules: + return RemoteError(name, failure.get('message'), trace) + + try: + mod = importutils.import_module(module) + klass = getattr(mod, name) + if not issubclass(klass, Exception): + raise TypeError("Can only deserialize Exceptions") + + failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) + except (AttributeError, TypeError, ImportError): + return RemoteError(name, failure.get('message'), trace) + + ex_type = type(failure) + str_override = lambda self: message + new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), + {'__str__': str_override, '__unicode__': str_override}) + try: + # NOTE(ameade): Dynamically create a new exception type and swap it in + # as the new type for the exception. This only works on user defined + # Exceptions and not core python exceptions. This is important because + # we cannot necessarily change an exception message so we must override + # the __str__ method. + failure.__class__ = new_ex_type + except TypeError: + # NOTE(ameade): If a core exception then just add the traceback to the + # first exception argument. + failure.args = (message,) + failure.args[1:] + return failure + + +class CommonRpcContext(object): + def __init__(self, **kwargs): + self.values = kwargs + + def __getattr__(self, key): + try: + return self.values[key] + except KeyError: + raise AttributeError(key) + + def to_dict(self): + return copy.deepcopy(self.values) + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def deepcopy(self): + return self.from_dict(self.to_dict()) + + def update_store(self): + local.store.context = self + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + # TODO(russellb) This method is a bit of a nova-ism. It makes + # some assumptions about the data in the request context sent + # across rpc, while the rest of this class does not. We could get + # rid of this if we changed the nova code that uses this to + # convert the RpcContext back to its native RequestContext doing + # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) + + context = self.deepcopy() + context.values['is_admin'] = True + + context.values.setdefault('roles', []) + + if 'admin' not in context.values['roles']: + context.values['roles'].append('admin') + + if read_deleted is not None: + context.values['read_deleted'] = read_deleted + + return context + + +class ClientException(Exception): + """This encapsulates some actual exception that is expected to be + hit by an RPC proxy object. Merely instantiating it records the + current exception information, which will be passed back to the + RPC client without exceptional logging.""" + def __init__(self): + self._exc_info = sys.exc_info() + + +def catch_client_exception(exceptions, func, *args, **kwargs): + try: + return func(*args, **kwargs) + except Exception, e: + if type(e) in exceptions: + raise ClientException() + else: + raise + + +def client_exceptions(*exceptions): + """Decorator for manager methods that raise expected exceptions. + Marking a Manager method with this decorator allows the declaration + of expected exceptions that the RPC layer should not consider fatal, + and not log as if they were generated in a real error scenario. Note + that this will cause listed exceptions to be wrapped in a + ClientException, which is used internally by the RPC layer.""" + def outer(func): + def inner(*args, **kwargs): + return catch_client_exception(exceptions, func, *args, **kwargs) + return inner + return outer + + +def version_is_compatible(imp_version, version): + """Determine whether versions are compatible. + + :param imp_version: The version implemented + :param version: The version requested by an incoming message. + """ + version_parts = version.split('.') + imp_version_parts = imp_version.split('.') + if int(version_parts[0]) != int(imp_version_parts[0]): # Major + return False + if int(version_parts[1]) > int(imp_version_parts[1]): # Minor + return False + return True + + +def serialize_msg(raw_msg): + # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more + # information about this format. + msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, + _MESSAGE_KEY: jsonutils.dumps(raw_msg)} + + return msg + + +def deserialize_msg(msg): + # NOTE(russellb): Hang on to your hats, this road is about to + # get a little bumpy. + # + # Robustness Principle: + # "Be strict in what you send, liberal in what you accept." + # + # At this point we have to do a bit of guessing about what it + # is we just received. Here is the set of possibilities: + # + # 1) We received a dict. This could be 2 things: + # + # a) Inspect it to see if it looks like a standard message envelope. + # If so, great! + # + # b) If it doesn't look like a standard message envelope, it could either + # be a notification, or a message from before we added a message + # envelope (referred to as version 1.0). + # Just return the message as-is. + # + # 2) It's any other non-dict type. Just return it and hope for the best. + # This case covers return values from rpc.call() from before message + # envelopes were used. (messages to call a method were always a dict) + + if not isinstance(msg, dict): + # See #2 above. + return msg + + base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) + if not all(map(lambda key: key in msg, base_envelope_keys)): + # See #1.b above. + return msg + + # At this point we think we have the message envelope + # format we were expecting. (#1.a above) + + if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): + raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) + + raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) + + return raw_msg diff --git a/cinder/openstack/common/rpc/dispatcher.py b/cinder/openstack/common/rpc/dispatcher.py new file mode 100644 index 0000000000..85195d4a74 --- /dev/null +++ b/cinder/openstack/common/rpc/dispatcher.py @@ -0,0 +1,153 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Code for rpc message dispatching. + +Messages that come in have a version number associated with them. RPC API +version numbers are in the form: + + Major.Minor + +For a given message with version X.Y, the receiver must be marked as able to +handle messages of version A.B, where: + + A = X + + B >= Y + +The Major version number would be incremented for an almost completely new API. +The Minor version number would be incremented for backwards compatible changes +to an existing API. A backwards compatible change could be something like +adding a new method, adding an argument to an existing method (but not +requiring it), or changing the type for an existing argument (but still +handling the old type as well). + +The conversion over to a versioned API must be done on both the client side and +server side of the API at the same time. However, as the code stands today, +there can be both versioned and unversioned APIs implemented in the same code +base. + +EXAMPLES +======== + +Nova was the first project to use versioned rpc APIs. Consider the compute rpc +API as an example. The client side is in nova/compute/rpcapi.py and the server +side is in nova/compute/manager.py. + + +Example 1) Adding a new method. +------------------------------- + +Adding a new method is a backwards compatible change. It should be added to +nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to +X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should +have a specific version specified to indicate the minimum API version that must +be implemented for the method to be supported. For example:: + + def get_host_uptime(self, ctxt, host): + topic = _compute_topic(self.topic, ctxt, host, None) + return self.call(ctxt, self.make_msg('get_host_uptime'), topic, + version='1.1') + +In this case, version '1.1' is the first version that supported the +get_host_uptime() method. + + +Example 2) Adding a new parameter. +---------------------------------- + +Adding a new parameter to an rpc method can be made backwards compatible. The +RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. +The implementation of the method must not expect the parameter to be present.:: + + def some_remote_method(self, arg1, arg2, newarg=None): + # The code needs to deal with newarg=None for cases + # where an older client sends a message without it. + pass + +On the client side, the same changes should be made as in example 1. The +minimum version that supports the new parameter should be specified. +""" + +from cinder.openstack.common.rpc import common as rpc_common + + +class RpcDispatcher(object): + """Dispatch rpc messages according to the requested API version. + + This class can be used as the top level 'manager' for a service. It + contains a list of underlying managers that have an API_VERSION attribute. + """ + + def __init__(self, callbacks): + """Initialize the rpc dispatcher. + + :param callbacks: List of proxy objects that are an instance + of a class with rpc methods exposed. Each proxy + object should have an RPC_API_VERSION attribute. + """ + self.callbacks = callbacks + super(RpcDispatcher, self).__init__() + + def dispatch(self, ctxt, version, method, namespace, **kwargs): + """Dispatch a message based on a requested version. + + :param ctxt: The request context + :param version: The requested API version from the incoming message + :param method: The method requested to be called by the incoming + message. + :param namespace: The namespace for the requested method. If None, + the dispatcher will look for a method on a callback + object with no namespace set. + :param kwargs: A dict of keyword arguments to be passed to the method. + + :returns: Whatever is returned by the underlying method that gets + called. + """ + if not version: + version = '1.0' + + had_compatible = False + for proxyobj in self.callbacks: + # Check for namespace compatibility + try: + cb_namespace = proxyobj.RPC_API_NAMESPACE + except AttributeError: + cb_namespace = None + + if namespace != cb_namespace: + continue + + # Check for version compatibility + try: + rpc_api_version = proxyobj.RPC_API_VERSION + except AttributeError: + rpc_api_version = '1.0' + + is_compatible = rpc_common.version_is_compatible(rpc_api_version, + version) + had_compatible = had_compatible or is_compatible + + if not hasattr(proxyobj, method): + continue + if is_compatible: + return getattr(proxyobj, method)(ctxt, **kwargs) + + if had_compatible: + raise AttributeError("No such RPC function '%s'" % method) + else: + raise rpc_common.UnsupportedRpcVersion(version=version) diff --git a/cinder/openstack/common/rpc/impl_fake.py b/cinder/openstack/common/rpc/impl_fake.py new file mode 100644 index 0000000000..ec7200a7b6 --- /dev/null +++ b/cinder/openstack/common/rpc/impl_fake.py @@ -0,0 +1,195 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fake RPC implementation which calls proxy methods directly with no +queues. Casts will block, but this is very useful for tests. +""" + +import inspect +# NOTE(russellb): We specifically want to use json, not our own jsonutils. +# jsonutils has some extra logic to automatically convert objects to primitive +# types so that they can be serialized. We want to catch all cases where +# non-primitive types make it into this code and treat it as an error. +import json +import time + +import eventlet + +from cinder.openstack.common.rpc import common as rpc_common + +CONSUMERS = {} + + +class RpcContext(rpc_common.CommonRpcContext): + def __init__(self, **kwargs): + super(RpcContext, self).__init__(**kwargs) + self._response = [] + self._done = False + + def deepcopy(self): + values = self.to_dict() + new_inst = self.__class__(**values) + new_inst._response = self._response + new_inst._done = self._done + return new_inst + + def reply(self, reply=None, failure=None, ending=False): + if ending: + self._done = True + if not self._done: + self._response.append((reply, failure)) + + +class Consumer(object): + def __init__(self, topic, proxy): + self.topic = topic + self.proxy = proxy + + def call(self, context, version, method, namespace, args, timeout): + done = eventlet.event.Event() + + def _inner(): + ctxt = RpcContext.from_dict(context.to_dict()) + try: + rval = self.proxy.dispatch(context, version, method, + namespace, **args) + res = [] + # Caller might have called ctxt.reply() manually + for (reply, failure) in ctxt._response: + if failure: + raise failure[0], failure[1], failure[2] + res.append(reply) + # if ending not 'sent'...we might have more data to + # return from the function itself + if not ctxt._done: + if inspect.isgenerator(rval): + for val in rval: + res.append(val) + else: + res.append(rval) + done.send(res) + except rpc_common.ClientException as e: + done.send_exception(e._exc_info[1]) + except Exception as e: + done.send_exception(e) + + thread = eventlet.greenthread.spawn(_inner) + + if timeout: + start_time = time.time() + while not done.ready(): + eventlet.greenthread.sleep(1) + cur_time = time.time() + if (cur_time - start_time) > timeout: + thread.kill() + raise rpc_common.Timeout() + + return done.wait() + + +class Connection(object): + """Connection object.""" + + def __init__(self): + self.consumers = [] + + def create_consumer(self, topic, proxy, fanout=False): + consumer = Consumer(topic, proxy) + self.consumers.append(consumer) + if topic not in CONSUMERS: + CONSUMERS[topic] = [] + CONSUMERS[topic].append(consumer) + + def close(self): + for consumer in self.consumers: + CONSUMERS[consumer.topic].remove(consumer) + self.consumers = [] + + def consume_in_thread(self): + pass + + +def create_connection(conf, new=True): + """Create a connection""" + return Connection() + + +def check_serialize(msg): + """Make sure a message intended for rpc can be serialized.""" + json.dumps(msg) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + + check_serialize(msg) + + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + version = msg.get('version', None) + namespace = msg.get('namespace', None) + + try: + consumer = CONSUMERS[topic][0] + except (KeyError, IndexError): + return iter([None]) + else: + return consumer.call(context, version, method, namespace, args, + timeout) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg): + check_serialize(msg) + try: + call(conf, context, topic, msg) + except Exception: + pass + + +def notify(conf, context, topic, msg, envelope): + check_serialize(msg) + + +def cleanup(): + pass + + +def fanout_cast(conf, context, topic, msg): + """Cast to all consumers of a topic""" + check_serialize(msg) + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + version = msg.get('version', None) + namespace = msg.get('namespace', None) + + for consumer in CONSUMERS.get(topic, []): + try: + consumer.call(context, version, method, namespace, args, None) + except Exception: + pass diff --git a/cinder/openstack/common/rpc/impl_kombu.py b/cinder/openstack/common/rpc/impl_kombu.py new file mode 100644 index 0000000000..681f531843 --- /dev/null +++ b/cinder/openstack/common/rpc/impl_kombu.py @@ -0,0 +1,838 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import itertools +import socket +import ssl +import sys +import time +import uuid + +import eventlet +import greenlet +import kombu +import kombu.connection +import kombu.entity +import kombu.messaging +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import network_utils +from cinder.openstack.common.rpc import amqp as rpc_amqp +from cinder.openstack.common.rpc import common as rpc_common + +kombu_opts = [ + cfg.StrOpt('kombu_ssl_version', + default='', + help='SSL version to use (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_keyfile', + default='', + help='SSL key file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_certfile', + default='', + help='SSL cert file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_ca_certs', + default='', + help=('SSL certification authority file ' + '(valid only if SSL enabled)')), + cfg.StrOpt('rabbit_host', + default='localhost', + help='The RabbitMQ broker address where a single node is used'), + cfg.IntOpt('rabbit_port', + default=5672, + help='The RabbitMQ broker port where a single node is used'), + cfg.ListOpt('rabbit_hosts', + default=['$rabbit_host:$rabbit_port'], + help='RabbitMQ HA cluster host:port pairs'), + cfg.BoolOpt('rabbit_use_ssl', + default=False, + help='connect over SSL for RabbitMQ'), + cfg.StrOpt('rabbit_userid', + default='guest', + help='the RabbitMQ userid'), + cfg.StrOpt('rabbit_password', + default='guest', + help='the RabbitMQ password', + secret=True), + cfg.StrOpt('rabbit_virtual_host', + default='/', + help='the RabbitMQ virtual host'), + cfg.IntOpt('rabbit_retry_interval', + default=1, + help='how frequently to retry connecting with RabbitMQ'), + cfg.IntOpt('rabbit_retry_backoff', + default=2, + help='how long to backoff for between retries when connecting ' + 'to RabbitMQ'), + cfg.IntOpt('rabbit_max_retries', + default=0, + help='maximum retries with trying to connect to RabbitMQ ' + '(the default of 0 implies an infinite retry count)'), + cfg.BoolOpt('rabbit_durable_queues', + default=False, + help='use durable queues in RabbitMQ'), + cfg.BoolOpt('rabbit_ha_queues', + default=False, + help='use H/A queues in RabbitMQ (x-ha-policy: all).' + 'You need to wipe RabbitMQ database when ' + 'changing this option.'), + +] + +cfg.CONF.register_opts(kombu_opts) + +LOG = rpc_common.LOG + + +def _get_queue_arguments(conf): + """Construct the arguments for declaring a queue. + + If the rabbit_ha_queues option is set, we declare a mirrored queue + as described here: + + http://www.rabbitmq.com/ha.html + + Setting x-ha-policy to all means that the queue will be mirrored + to all nodes in the cluster. + """ + return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Declare a queue on an amqp channel. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-declare the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + try: + msg = rpc_common.deserialize_msg(message.payload) + callback(msg) + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + finally: + message.ack() + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange(name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectConsumer, self).__init__(channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, channel, topic, callback, tag, name=None, + exchange_name=None, **kwargs): + """Init a 'topic' queue. + + :param channel: the amqp channel to use + :param topic: the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param tag: a unique ID for the consumer on the channel + :param name: optional queue name, defaults to topic + :paramtype name: str + + Other kombu options may be passed as keyword arguments + """ + # Default options + options = {'durable': conf.rabbit_durable_queues, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + exchange = kombu.entity.Exchange(name=exchange_name, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicConsumer, self).__init__(channel, + callback, + tag, + name=name or topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutConsumer, self).__init__(channel, callback, tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, + routing_key=self.routing_key) + + def send(self, msg, timeout=None): + """Send a message""" + if timeout: + # + # AMQP TTL is in milliseconds when set in the header. + # + self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) + else: + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, msg_id, msg_id, + type='direct', **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': conf.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(channel, + exchange_name, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': False} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, + None, type='fanout', **options) + + +class NotifyPublisher(TopicPublisher): + """Publisher class for 'notify'""" + + def __init__(self, conf, channel, topic, **kwargs): + self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.queue_arguments = _get_queue_arguments(conf) + super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) + + def reconnect(self, channel): + super(NotifyPublisher, self).reconnect(channel) + + # NOTE(jerdfelt): Normally the consumer would create the queue, but + # we do this to ensure that messages don't get dropped if the + # consumer is started after we do + queue = kombu.entity.Queue(channel=channel, + exchange=self.exchange, + durable=self.durable, + name=self.routing_key, + routing_key=self.routing_key, + queue_arguments=self.queue_arguments) + queue.declare() + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + self.consumers = [] + self.consumer_thread = None + self.proxy_callbacks = [] + self.conf = conf + self.max_retries = self.conf.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None + self.interval_start = self.conf.rabbit_retry_interval + self.interval_stepping = self.conf.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 + self.memory_transport = False + + if server_params is None: + server_params = {} + # Keys to translate from server_params to kombu params + server_params_to_kombu_params = {'username': 'userid'} + + ssl_params = self._fetch_ssl_params() + params_list = [] + for adr in self.conf.rabbit_hosts: + hostname, port = network_utils.parse_host_port( + adr, default_port=self.conf.rabbit_port) + + params = { + 'hostname': hostname, + 'port': port, + 'userid': self.conf.rabbit_userid, + 'password': self.conf.rabbit_password, + 'virtual_host': self.conf.rabbit_virtual_host, + } + + for sp_key, value in server_params.iteritems(): + p_key = server_params_to_kombu_params.get(sp_key, sp_key) + params[p_key] = value + + if self.conf.fake_rabbit: + params['transport'] = 'memory' + if self.conf.rabbit_use_ssl: + params['ssl'] = ssl_params + + params_list.append(params) + + self.params_list = params_list + + self.memory_transport = self.conf.fake_rabbit + + self.connection = None + self.reconnect() + + def _fetch_ssl_params(self): + """Handles fetching what ssl params + should be used for the connection (if any)""" + ssl_params = dict() + + # http://docs.python.org/library/ssl.html - ssl.wrap_socket + if self.conf.kombu_ssl_version: + ssl_params['ssl_version'] = self.conf.kombu_ssl_version + if self.conf.kombu_ssl_keyfile: + ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile + if self.conf.kombu_ssl_certfile: + ssl_params['certfile'] = self.conf.kombu_ssl_certfile + if self.conf.kombu_ssl_ca_certs: + ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs + # We might want to allow variations in the + # future with this? + ssl_params['cert_reqs'] = ssl.CERT_REQUIRED + + if not ssl_params: + # Just have the default behavior + return True + else: + # Return the extended behavior + return ssl_params + + def _connect(self, params): + """Connect to rabbit. Re-establish any queues that may have + been declared before if we are reconnecting. Exceptions should + be handled by the caller. + """ + if self.connection: + LOG.info(_("Reconnecting to AMQP server on " + "%(hostname)s:%(port)d") % params) + try: + self.connection.release() + except self.connection_errors: + pass + # Setting this in case the next statement fails, though + # it shouldn't be doing any network operations, yet. + self.connection = None + self.connection = kombu.connection.BrokerConnection(**params) + self.connection_errors = self.connection.connection_errors + if self.memory_transport: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 + self.consumer_num = itertools.count(1) + self.connection.connect() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + for consumer in self.consumers: + consumer.reconnect(self.channel) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % + params) + + def reconnect(self): + """Handles reconnecting and re-establishing queues. + Will retry up to self.max_retries number of times. + self.max_retries = 0 means to retry forever. + Sleep between tries, starting at self.interval_start + seconds, backing off self.interval_stepping number of seconds + each attempt. + """ + + attempt = 0 + while True: + params = self.params_list[attempt % len(self.params_list)] + attempt += 1 + try: + self._connect(params) + return + except (IOError, self.connection_errors) as e: + pass + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + + log_info = {} + log_info['err_str'] = str(e) + log_info['max_retries'] = self.max_retries + log_info.update(params) + + if self.max_retries and attempt == self.max_retries: + LOG.error(_('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info) + # NOTE(comstud): Copied from original code. There's + # really no better recourse because if this was a queue we + # need to consume on, we have no way to consume anymore. + sys.exit(1) + + if attempt == 1: + sleep_time = self.interval_start or 1 + elif attempt > 1: + sleep_time += self.interval_stepping + if self.interval_max: + sleep_time = min(sleep_time, self.interval_max) + + log_info['sleep_time'] = sleep_time + LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' + 'unreachable: %(err_str)s. Trying again in ' + '%(sleep_time)d seconds.') % log_info) + time.sleep(sleep_time) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (self.connection_errors, socket.timeout, IOError), e: + if error_callback: + error_callback(e) + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + if error_callback: + error_callback(e) + self.reconnect() + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.channel.close() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + self.consumers = [] + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + info = {'do_consume': True} + + def _error_callback(exc): + if isinstance(exc, socket.timeout): + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + info['do_consume'] = True + + def _consume(): + if info['do_consume']: + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + info['do_consume'] = False + return self.connection.drain_events(timeout=timeout) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def wait_on_proxy_callbacks(self): + """Wait for all proxy callback threads to exit.""" + for proxy_cb in self.proxy_callbacks: + proxy_cb.wait() + + def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): + """Send to a publisher based on the publisher class""" + + def _error_callback(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publish(): + publisher = cls(self.conf, self.channel, topic, **kwargs) + publisher.send(msg, timeout) + + self.ensure(_error_callback, _publish) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): + """Create a 'topic' consumer.""" + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + exchange_name=exchange_name, + ), + topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg, timeout=None): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg, timeout) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + + if fanout: + self.declare_fanout_consumer(topic, proxy_cb) + else: + self.declare_topic_consumer(topic, proxy_cb) + + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + self.declare_topic_consumer(topic, proxy_cb, pool_name) + + def join_consumer_pool(self, callback, pool_name, topic, + exchange_name=None): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + """ + callback_wrapper = rpc_amqp.CallbackWrapper( + conf=self.conf, + callback=callback, + connection_pool=rpc_amqp.get_connection_pool(self.conf, + Connection), + ) + self.proxy_callbacks.append(callback_wrapper) + self.declare_topic_consumer( + queue_name=pool_name, + topic=topic, + exchange_name=exchange_name, + callback=callback_wrapper, + ) + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection( + conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg, envelope): + """Sends a notification event on a topic.""" + return rpc_amqp.notify( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection), + envelope) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) diff --git a/cinder/openstack/common/rpc/impl_qpid.py b/cinder/openstack/common/rpc/impl_qpid.py new file mode 100644 index 0000000000..24235b1f11 --- /dev/null +++ b/cinder/openstack/common/rpc/impl_qpid.py @@ -0,0 +1,649 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import itertools +import time +import uuid + +import eventlet +import greenlet +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import amqp as rpc_amqp +from cinder.openstack.common.rpc import common as rpc_common + +qpid_messaging = importutils.try_import("qpid.messaging") +qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") + +LOG = logging.getLogger(__name__) + +qpid_opts = [ + cfg.StrOpt('qpid_hostname', + default='localhost', + help='Qpid broker hostname'), + cfg.IntOpt('qpid_port', + default=5672, + help='Qpid broker port'), + cfg.ListOpt('qpid_hosts', + default=['$qpid_hostname:$qpid_port'], + help='Qpid HA cluster host:port pairs'), + cfg.StrOpt('qpid_username', + default='', + help='Username for qpid connection'), + cfg.StrOpt('qpid_password', + default='', + help='Password for qpid connection', + secret=True), + cfg.StrOpt('qpid_sasl_mechanisms', + default='', + help='Space separated list of SASL mechanisms to use for auth'), + cfg.IntOpt('qpid_heartbeat', + default=60, + help='Seconds between connection keepalive heartbeats'), + cfg.StrOpt('qpid_protocol', + default='tcp', + help="Transport to use, either 'tcp' or 'ssl'"), + cfg.BoolOpt('qpid_tcp_nodelay', + default=True, + help='Disable Nagle algorithm'), +] + +cfg.CONF.register_opts(qpid_opts) + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, session, callback, node_name, node_opts, + link_name, link_opts): + """Declare a queue on an amqp session. + + 'session' is the amqp session to use + 'callback' is the callback to call when messages are received + 'node_name' is the first part of the Qpid address string, before ';' + 'node_opts' will be applied to the "x-declare" section of "node" + in the address string. + 'link_name' goes into the "name" field of the "link" in the address + string + 'link_opts' will be applied to the "x-declare" section of "link" + in the address string. + """ + self.callback = callback + self.receiver = None + self.session = None + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { + "name": link_name, + "durable": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, + }, + } + addr_opts["node"]["x-declare"].update(node_opts) + addr_opts["link"]["x-declare"].update(link_opts) + + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-declare the receiver after a qpid reconnect""" + self.session = session + self.receiver = session.receiver(self.address) + self.receiver.capacity = 1 + + def consume(self): + """Fetch the message and pass it to the callback object""" + message = self.receiver.fetch() + try: + msg = rpc_common.deserialize_msg(message.content) + self.callback(msg) + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + finally: + self.session.acknowledge(message) + + def get_receiver(self): + return self.receiver + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, session, msg_id, callback): + """Init a 'direct' queue. + + 'session' is the amqp session to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + """ + + super(DirectConsumer, self).__init__(session, callback, + "%s/%s" % (msg_id, msg_id), + {"type": "direct"}, + msg_id, + {"exclusive": True}) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, session, topic, callback, name=None, + exchange_name=None): + """Init a 'topic' queue. + + :param session: the amqp session to use + :param topic: is the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param name: optional queue name, defaults to topic + """ + + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + super(TopicConsumer, self).__init__(session, callback, + "%s/%s" % (exchange_name, topic), + {}, name or topic, {}) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, session, topic, callback): + """Init a 'fanout' queue. + + 'session' is the amqp session to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + """ + + super(FanoutConsumer, self).__init__( + session, callback, + "%s_fanout" % topic, + {"durable": False, "type": "fanout"}, + "%s_fanout_%s" % (topic, uuid.uuid4().hex), + {"exclusive": True}) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, session, node_name, node_opts=None): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.sender = None + self.session = session + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, + }, + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) + + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-establish the Sender after a reconnection""" + self.sender = session.sender(self.address) + + def send(self, msg): + """Send a message""" + self.sender.send(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, session, msg_id): + """Init a 'direct' publisher.""" + super(DirectPublisher, self).__init__(session, msg_id, + {"type": "Direct"}) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic)) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, session, topic): + """init a 'fanout' publisher. + """ + super(FanoutPublisher, self).__init__( + session, + "%s_fanout" % topic, {"type": "fanout"}) + + +class NotifyPublisher(Publisher): + """Publisher class for notifications""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + exchange_name = rpc_amqp.get_control_exchange(conf) + super(NotifyPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic), + {"durable": True}) + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + if not qpid_messaging: + raise ImportError("Failed to import qpid.messaging") + + self.session = None + self.consumers = {} + self.consumer_thread = None + self.proxy_callbacks = [] + self.conf = conf + + if server_params and 'hostname' in server_params: + # NOTE(russellb) This enables support for cast_to_server. + server_params['qpid_hosts'] = [ + '%s:%d' % (server_params['hostname'], + server_params.get('port', 5672)) + ] + + params = { + 'qpid_hosts': self.conf.qpid_hosts, + 'username': self.conf.qpid_username, + 'password': self.conf.qpid_password, + } + params.update(server_params or {}) + + self.brokers = params['qpid_hosts'] + self.username = params['username'] + self.password = params['password'] + self.connection_create(self.brokers[0]) + self.reconnect() + + def connection_create(self, broker): + # Create the connection - this does not open the connection + self.connection = qpid_messaging.Connection(broker) + + # Check if flags are set and if so set them for the connection + # before we call open + self.connection.username = self.username + self.connection.password = self.password + + self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms + # Reconnection is done by self.reconnect() + self.connection.reconnect = False + self.connection.heartbeat = self.conf.qpid_heartbeat + self.connection.transport = self.conf.qpid_protocol + self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay + + def _register_consumer(self, consumer): + self.consumers[str(consumer.get_receiver())] = consumer + + def _lookup_consumer(self, receiver): + return self.consumers[str(receiver)] + + def reconnect(self): + """Handles reconnecting and re-establishing sessions and queues""" + if self.connection.opened(): + try: + self.connection.close() + except qpid_exceptions.ConnectionError: + pass + + attempt = 0 + delay = 1 + while True: + broker = self.brokers[attempt % len(self.brokers)] + attempt += 1 + + try: + self.connection_create(broker) + self.connection.open() + except qpid_exceptions.ConnectionError, e: + msg_dict = dict(e=e, delay=delay) + msg = _("Unable to connect to AMQP server: %(e)s. " + "Sleeping %(delay)s seconds") % msg_dict + LOG.error(msg) + time.sleep(delay) + delay = min(2 * delay, 60) + else: + LOG.info(_('Connected to AMQP server on %s'), broker) + break + + self.session = self.connection.session() + + if self.consumers: + consumers = self.consumers + self.consumers = {} + + for consumer in consumers.itervalues(): + consumer.reconnect(self.session) + self._register_consumer(consumer) + + LOG.debug(_("Re-established AMQP queues")) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (qpid_exceptions.Empty, + qpid_exceptions.ConnectionError), e: + if error_callback: + error_callback(e) + self.reconnect() + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.connection.close() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() + self.session.close() + self.session = self.connection.session() + self.consumers = {} + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.session, topic, callback) + self._register_consumer(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + def _error_callback(exc): + if isinstance(exc, qpid_exceptions.Empty): + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + + def _consume(): + nxt_receiver = self.session.next_receiver(timeout=timeout) + try: + self._lookup_consumer(nxt_receiver).consume() + except Exception: + LOG.exception(_("Error processing message. Skipping it.")) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def wait_on_proxy_callbacks(self): + """Wait for all proxy callback threads to exit.""" + for proxy_cb in self.proxy_callbacks: + proxy_cb.wait() + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publisher_send(): + publisher = cls(self.conf, self.session, topic) + publisher.send(msg) + + return self.ensure(_connect_error, _publisher_send) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): + """Create a 'topic' consumer.""" + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + exchange_name=exchange_name, + ), + topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg, timeout=None): + """Send a 'topic' message""" + # + # We want to create a message with attributes, e.g. a TTL. We + # don't really need to keep 'msg' in its JSON format any longer + # so let's create an actual qpid message here and get some + # value-add on the go. + # + # WARNING: Request timeout happens to be in the same units as + # qpid's TTL (seconds). If this changes in the future, then this + # will need to be altered accordingly. + # + qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) + self.publisher_send(TopicPublisher, topic, qpid_message) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + + if fanout: + consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) + else: + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) + + self._register_consumer(consumer) + + return consumer + + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) + + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, + name=pool_name) + + self._register_consumer(consumer) + + return consumer + + def join_consumer_pool(self, callback, pool_name, topic, + exchange_name=None): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + """ + callback_wrapper = rpc_amqp.CallbackWrapper( + conf=self.conf, + callback=callback, + connection_pool=rpc_amqp.get_connection_pool(self.conf, + Connection), + ) + self.proxy_callbacks.append(callback_wrapper) + + consumer = TopicConsumer(conf=self.conf, + session=self.session, + topic=topic, + callback=callback_wrapper, + name=pool_name, + exchange_name=exchange_name) + + self._register_consumer(consumer) + return consumer + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection( + conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg, envelope): + """Sends a notification event on a topic.""" + return rpc_amqp.notify(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection), + envelope) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) diff --git a/cinder/openstack/common/rpc/impl_zmq.py b/cinder/openstack/common/rpc/impl_zmq.py new file mode 100644 index 0000000000..d3d3599e8f --- /dev/null +++ b/cinder/openstack/common/rpc/impl_zmq.py @@ -0,0 +1,851 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import pprint +import re +import socket +import sys +import types +import uuid + +import eventlet +import greenlet +from oslo.config import cfg + +from cinder.openstack.common import excutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import processutils as utils +from cinder.openstack.common.rpc import common as rpc_common + +zmq = importutils.try_import('eventlet.green.zmq') + +# for convenience, are not modified. +pformat = pprint.pformat +Timeout = eventlet.timeout.Timeout +LOG = rpc_common.LOG +RemoteError = rpc_common.RemoteError +RPCException = rpc_common.RPCException + +zmq_opts = [ + cfg.StrOpt('rpc_zmq_bind_address', default='*', + help='ZeroMQ bind address. Should be a wildcard (*), ' + 'an ethernet interface, or IP. ' + 'The "host" option should point or resolve to this ' + 'address.'), + + # The module.Class to use for matchmaking. + cfg.StrOpt( + 'rpc_zmq_matchmaker', + default=('cinder.openstack.common.rpc.' + 'matchmaker.MatchMakerLocalhost'), + help='MatchMaker driver', + ), + + # The following port is unassigned by IANA as of 2012-05-21 + cfg.IntOpt('rpc_zmq_port', default=9501, + help='ZeroMQ receiver listening port'), + + cfg.IntOpt('rpc_zmq_contexts', default=1, + help='Number of ZeroMQ contexts, defaults to 1'), + + cfg.IntOpt('rpc_zmq_topic_backlog', default=None, + help='Maximum number of ingress messages to locally buffer ' + 'per topic. Default is unlimited.'), + + cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', + help='Directory for holding IPC sockets'), + + cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), + help='Name of this node. Must be a valid hostname, FQDN, or ' + 'IP address. Must match "host" option, if running Nova.') +] + + +CONF = cfg.CONF +CONF.register_opts(zmq_opts) + +ZMQ_CTX = None # ZeroMQ Context, must be global. +matchmaker = None # memoized matchmaker object + + +def _serialize(data): + """ + Serialization wrapper + We prefer using JSON, but it cannot encode all types. + Error if a developer passes us bad data. + """ + try: + return jsonutils.dumps(data, ensure_ascii=True) + except TypeError: + with excutils.save_and_reraise_exception(): + LOG.error(_("JSON serialization failed.")) + + +def _deserialize(data): + """ + Deserialization wrapper + """ + LOG.debug(_("Deserializing: %s"), data) + return jsonutils.loads(data) + + +class ZmqSocket(object): + """ + A tiny wrapper around ZeroMQ to simplify the send/recv protocol + and connection management. + + Can be used as a Context (supports the 'with' statement). + """ + + def __init__(self, addr, zmq_type, bind=True, subscribe=None): + self.sock = _get_ctxt().socket(zmq_type) + self.addr = addr + self.type = zmq_type + self.subscriptions = [] + + # Support failures on sending/receiving on wrong socket type. + self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) + self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) + self.can_sub = zmq_type in (zmq.SUB, ) + + # Support list, str, & None for subscribe arg (cast to list) + do_sub = { + list: subscribe, + str: [subscribe], + type(None): [] + }[type(subscribe)] + + for f in do_sub: + self.subscribe(f) + + str_data = {'addr': addr, 'type': self.socket_s(), + 'subscribe': subscribe, 'bind': bind} + + LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) + LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) + LOG.debug(_("-> bind: %(bind)s"), str_data) + + try: + if bind: + self.sock.bind(addr) + else: + self.sock.connect(addr) + except Exception: + raise RPCException(_("Could not open socket.")) + + def socket_s(self): + """Get socket type as string.""" + t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', + 'DEALER') + return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] + + def subscribe(self, msg_filter): + """Subscribe.""" + if not self.can_sub: + raise RPCException("Cannot subscribe on this socket.") + LOG.debug(_("Subscribing to %s"), msg_filter) + + try: + self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) + except Exception: + return + + self.subscriptions.append(msg_filter) + + def unsubscribe(self, msg_filter): + """Unsubscribe.""" + if msg_filter not in self.subscriptions: + return + self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) + self.subscriptions.remove(msg_filter) + + def close(self): + if self.sock is None or self.sock.closed: + return + + # We must unsubscribe, or we'll leak descriptors. + if len(self.subscriptions) > 0: + for f in self.subscriptions: + try: + self.sock.setsockopt(zmq.UNSUBSCRIBE, f) + except Exception: + pass + self.subscriptions = [] + + try: + # Default is to linger + self.sock.close() + except Exception: + # While this is a bad thing to happen, + # it would be much worse if some of the code calling this + # were to fail. For now, lets log, and later evaluate + # if we can safely raise here. + LOG.error("ZeroMQ socket could not be closed.") + self.sock = None + + def recv(self): + if not self.can_recv: + raise RPCException(_("You cannot recv on this socket.")) + return self.sock.recv_multipart() + + def send(self, data): + if not self.can_send: + raise RPCException(_("You cannot send on this socket.")) + self.sock.send_multipart(data) + + +class ZmqClient(object): + """Client for ZMQ sockets.""" + + def __init__(self, addr, socket_type=None, bind=False): + if socket_type is None: + socket_type = zmq.PUSH + self.outq = ZmqSocket(addr, socket_type, bind=bind) + + def cast(self, msg_id, topic, data, envelope=False): + msg_id = msg_id or 0 + + if not envelope: + self.outq.send(map(bytes, + (msg_id, topic, 'cast', _serialize(data)))) + return + + rpc_envelope = rpc_common.serialize_msg(data[1], envelope) + zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) + self.outq.send(map(bytes, + (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) + + def close(self): + self.outq.close() + + +class RpcContext(rpc_common.CommonRpcContext): + """Context that supports replying to a rpc.call.""" + def __init__(self, **kwargs): + self.replies = [] + super(RpcContext, self).__init__(**kwargs) + + def deepcopy(self): + values = self.to_dict() + values['replies'] = self.replies + return self.__class__(**values) + + def reply(self, reply=None, failure=None, ending=False): + if ending: + return + self.replies.append(reply) + + @classmethod + def marshal(self, ctx): + ctx_data = ctx.to_dict() + return _serialize(ctx_data) + + @classmethod + def unmarshal(self, data): + return RpcContext.from_dict(_deserialize(data)) + + +class InternalContext(object): + """Used by ConsumerBase as a private context for - methods.""" + + def __init__(self, proxy): + self.proxy = proxy + self.msg_waiter = None + + def _get_response(self, ctx, proxy, topic, data): + """Process a curried message and cast the result to topic.""" + LOG.debug(_("Running func with context: %s"), ctx.to_dict()) + data.setdefault('version', None) + data.setdefault('args', {}) + + try: + result = proxy.dispatch( + ctx, data['version'], data['method'], + data.get('namespace'), **data['args']) + return ConsumerBase.normalize_reply(result, ctx.replies) + except greenlet.GreenletExit: + # ignore these since they are just from shutdowns + pass + except rpc_common.ClientException, e: + LOG.debug(_("Expected exception during message handling (%s)") % + e._exc_info[1]) + return {'exc': + rpc_common.serialize_remote_exception(e._exc_info, + log_failure=False)} + except Exception: + LOG.error(_("Exception during message handling")) + return {'exc': + rpc_common.serialize_remote_exception(sys.exc_info())} + + def reply(self, ctx, proxy, + msg_id=None, context=None, topic=None, msg=None): + """Reply to a casted call.""" + # NOTE(ewindisch): context kwarg exists for Grizzly compat. + # this may be able to be removed earlier than + # 'I' if ConsumerBase.process were refactored. + if type(msg) is list: + payload = msg[-1] + else: + payload = msg + + response = ConsumerBase.normalize_reply( + self._get_response(ctx, proxy, topic, payload), + ctx.replies) + + LOG.debug(_("Sending reply")) + _multi_send(_cast, ctx, topic, { + 'method': '-process_reply', + 'args': { + 'msg_id': msg_id, # Include for Folsom compat. + 'response': response + } + }, _msg_id=msg_id) + + +class ConsumerBase(object): + """Base Consumer.""" + + def __init__(self): + self.private_ctx = InternalContext(None) + + @classmethod + def normalize_reply(self, result, replies): + #TODO(ewindisch): re-evaluate and document this method. + if isinstance(result, types.GeneratorType): + return list(result) + elif replies: + return replies + else: + return [result] + + def process(self, proxy, ctx, data): + data.setdefault('version', None) + data.setdefault('args', {}) + + # Method starting with - are + # processed internally. (non-valid method name) + method = data.get('method') + if not method: + LOG.error(_("RPC message did not include method.")) + return + + # Internal method + # uses internal context for safety. + if method == '-reply': + self.private_ctx.reply(ctx, proxy, **data['args']) + return + + proxy.dispatch(ctx, data['version'], + data['method'], data.get('namespace'), **data['args']) + + +class ZmqBaseReactor(ConsumerBase): + """ + A consumer class implementing a + centralized casting broker (PULL-PUSH) + for RoundRobin requests. + """ + + def __init__(self, conf): + super(ZmqBaseReactor, self).__init__() + + self.mapping = {} + self.proxies = {} + self.threads = [] + self.sockets = [] + self.subscribe = {} + + self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) + + def register(self, proxy, in_addr, zmq_type_in, out_addr=None, + zmq_type_out=None, in_bind=True, out_bind=True, + subscribe=None): + + LOG.info(_("Registering reactor")) + + if zmq_type_in not in (zmq.PULL, zmq.SUB): + raise RPCException("Bad input socktype") + + # Items push in. + inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, + subscribe=subscribe) + + self.proxies[inq] = proxy + self.sockets.append(inq) + + LOG.info(_("In reactor registered")) + + if not out_addr: + return + + if zmq_type_out not in (zmq.PUSH, zmq.PUB): + raise RPCException("Bad output socktype") + + # Items push out. + outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) + + self.mapping[inq] = outq + self.mapping[outq] = inq + self.sockets.append(outq) + + LOG.info(_("Out reactor registered")) + + def consume_in_thread(self): + def _consume(sock): + LOG.info(_("Consuming socket")) + while True: + self.consume(sock) + + for k in self.proxies.keys(): + self.threads.append( + self.pool.spawn(_consume, k) + ) + + def wait(self): + for t in self.threads: + t.wait() + + def close(self): + for s in self.sockets: + s.close() + + for t in self.threads: + t.kill() + + +class ZmqProxy(ZmqBaseReactor): + """ + A consumer class implementing a + topic-based proxy, forwarding to + IPC sockets. + """ + + def __init__(self, conf): + super(ZmqProxy, self).__init__(conf) + pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\')) + self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep))) + + self.topic_proxy = {} + + def consume(self, sock): + ipc_dir = CONF.rpc_zmq_ipc_dir + + #TODO(ewindisch): use zero-copy (i.e. references, not copying) + data = sock.recv() + topic = data[1] + + LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) + + if topic.startswith('fanout~'): + sock_type = zmq.PUB + topic = topic.split('.', 1)[0] + elif topic.startswith('zmq_replies'): + sock_type = zmq.PUB + else: + sock_type = zmq.PUSH + + if topic not in self.topic_proxy: + def publisher(waiter): + LOG.info(_("Creating proxy for topic: %s"), topic) + + try: + # The topic is received over the network, + # don't trust this input. + if self.badchars.search(topic) is not None: + emsg = _("Topic contained dangerous characters.") + LOG.warn(emsg) + raise RPCException(emsg) + + out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % + (ipc_dir, topic), + sock_type, bind=True) + except RPCException: + waiter.send_exception(*sys.exc_info()) + return + + self.topic_proxy[topic] = eventlet.queue.LightQueue( + CONF.rpc_zmq_topic_backlog) + self.sockets.append(out_sock) + + # It takes some time for a pub socket to open, + # before we can have any faith in doing a send() to it. + if sock_type == zmq.PUB: + eventlet.sleep(.5) + + waiter.send(True) + + while(True): + data = self.topic_proxy[topic].get() + out_sock.send(data) + LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % + {'data': data}) + + wait_sock_creation = eventlet.event.Event() + eventlet.spawn(publisher, wait_sock_creation) + + try: + wait_sock_creation.wait() + except RPCException: + LOG.error(_("Topic socket file creation failed.")) + return + + try: + self.topic_proxy[topic].put_nowait(data) + LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % + {'data': data}) + except eventlet.queue.Full: + LOG.error(_("Local per-topic backlog buffer full for topic " + "%(topic)s. Dropping message.") % {'topic': topic}) + + def consume_in_thread(self): + """Runs the ZmqProxy service""" + ipc_dir = CONF.rpc_zmq_ipc_dir + consume_in = "tcp://%s:%s" % \ + (CONF.rpc_zmq_bind_address, + CONF.rpc_zmq_port) + consumption_proxy = InternalContext(None) + + if not os.path.isdir(ipc_dir): + try: + utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) + utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), + ipc_dir, run_as_root=True) + utils.execute('chmod', '750', ipc_dir, run_as_root=True) + except utils.ProcessExecutionError: + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create IPC directory %s") % + (ipc_dir, )) + + try: + self.register(consumption_proxy, + consume_in, + zmq.PULL, + out_bind=True) + except zmq.ZMQError: + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create ZeroMQ receiver daemon. " + "Socket may already be in use.")) + + super(ZmqProxy, self).consume_in_thread() + + +def unflatten_envelope(packenv): + """Unflattens the RPC envelope. + Takes a list and returns a dictionary. + i.e. [1,2,3,4] => {1: 2, 3: 4} + """ + i = iter(packenv) + h = {} + try: + while True: + k = i.next() + h[k] = i.next() + except StopIteration: + return h + + +class ZmqReactor(ZmqBaseReactor): + """ + A consumer class implementing a + consumer for messages. Can also be + used as a 1:1 proxy + """ + + def __init__(self, conf): + super(ZmqReactor, self).__init__(conf) + + def consume(self, sock): + #TODO(ewindisch): use zero-copy (i.e. references, not copying) + data = sock.recv() + LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) + if sock in self.mapping: + LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { + 'data': data}) + self.mapping[sock].send(data) + return + + proxy = self.proxies[sock] + + if data[2] == 'cast': # Legacy protocol + packenv = data[3] + + ctx, msg = _deserialize(packenv) + request = rpc_common.deserialize_msg(msg) + ctx = RpcContext.unmarshal(ctx) + elif data[2] == 'impl_zmq_v2': + packenv = data[4:] + + msg = unflatten_envelope(packenv) + request = rpc_common.deserialize_msg(msg) + + # Unmarshal only after verifying the message. + ctx = RpcContext.unmarshal(data[3]) + else: + LOG.error(_("ZMQ Envelope version unsupported or unknown.")) + return + + self.pool.spawn_n(self.process, proxy, ctx, request) + + +class Connection(rpc_common.Connection): + """Manages connections and threads.""" + + def __init__(self, conf): + self.topics = [] + self.reactor = ZmqReactor(conf) + + def create_consumer(self, topic, proxy, fanout=False): + # Register with matchmaker. + _get_matchmaker().register(topic, CONF.rpc_zmq_host) + + # Subscription scenarios + if fanout: + sock_type = zmq.SUB + subscribe = ('', fanout)[type(fanout) == str] + topic = 'fanout~' + topic.split('.', 1)[0] + else: + sock_type = zmq.PULL + subscribe = None + topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) + + if topic in self.topics: + LOG.info(_("Skipping topic registration. Already registered.")) + return + + # Receive messages from (local) proxy + inaddr = "ipc://%s/zmq_topic_%s" % \ + (CONF.rpc_zmq_ipc_dir, topic) + + LOG.debug(_("Consumer is a zmq.%s"), + ['PULL', 'SUB'][sock_type == zmq.SUB]) + + self.reactor.register(proxy, inaddr, sock_type, + subscribe=subscribe, in_bind=False) + self.topics.append(topic) + + def close(self): + _get_matchmaker().stop_heartbeat() + for topic in self.topics: + _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) + + self.reactor.close() + self.topics = [] + + def wait(self): + self.reactor.wait() + + def consume_in_thread(self): + _get_matchmaker().start_heartbeat() + self.reactor.consume_in_thread() + + +def _cast(addr, context, topic, msg, timeout=None, envelope=False, + _msg_id=None): + timeout_cast = timeout or CONF.rpc_cast_timeout + payload = [RpcContext.marshal(context), msg] + + with Timeout(timeout_cast, exception=rpc_common.Timeout): + try: + conn = ZmqClient(addr) + + # assumes cast can't return an exception + conn.cast(_msg_id, topic, payload, envelope) + except zmq.ZMQError: + raise RPCException("Cast failed. ZMQ Socket Exception") + finally: + if 'conn' in vars(): + conn.close() + + +def _call(addr, context, topic, msg, timeout=None, + envelope=False): + # timeout_response is how long we wait for a response + timeout = timeout or CONF.rpc_response_timeout + + # The msg_id is used to track replies. + msg_id = uuid.uuid4().hex + + # Replies always come into the reply service. + reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host + + LOG.debug(_("Creating payload")) + # Curry the original request into a reply method. + mcontext = RpcContext.marshal(context) + payload = { + 'method': '-reply', + 'args': { + 'msg_id': msg_id, + 'topic': reply_topic, + # TODO(ewindisch): safe to remove mcontext in I. + 'msg': [mcontext, msg] + } + } + + LOG.debug(_("Creating queue socket for reply waiter")) + + # Messages arriving async. + # TODO(ewindisch): have reply consumer with dynamic subscription mgmt + with Timeout(timeout, exception=rpc_common.Timeout): + try: + msg_waiter = ZmqSocket( + "ipc://%s/zmq_topic_zmq_replies.%s" % + (CONF.rpc_zmq_ipc_dir, + CONF.rpc_zmq_host), + zmq.SUB, subscribe=msg_id, bind=False + ) + + LOG.debug(_("Sending cast")) + _cast(addr, context, topic, payload, envelope) + + LOG.debug(_("Cast sent; Waiting reply")) + # Blocks until receives reply + msg = msg_waiter.recv() + LOG.debug(_("Received message: %s"), msg) + LOG.debug(_("Unpacking response")) + + if msg[2] == 'cast': # Legacy version + raw_msg = _deserialize(msg[-1])[-1] + elif msg[2] == 'impl_zmq_v2': + rpc_envelope = unflatten_envelope(msg[4:]) + raw_msg = rpc_common.deserialize_msg(rpc_envelope) + else: + raise rpc_common.UnsupportedRpcEnvelopeVersion( + _("Unsupported or unknown ZMQ envelope returned.")) + + responses = raw_msg['args']['response'] + # ZMQError trumps the Timeout error. + except zmq.ZMQError: + raise RPCException("ZMQ Socket Error") + except (IndexError, KeyError): + raise RPCException(_("RPC Message Invalid.")) + finally: + if 'msg_waiter' in vars(): + msg_waiter.close() + + # It seems we don't need to do all of the following, + # but perhaps it would be useful for multicall? + # One effect of this is that we're checking all + # responses for Exceptions. + for resp in responses: + if isinstance(resp, types.DictType) and 'exc' in resp: + raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) + + return responses[-1] + + +def _multi_send(method, context, topic, msg, timeout=None, + envelope=False, _msg_id=None): + """ + Wraps the sending of messages, + dispatches to the matchmaker and sends + message to all relevant hosts. + """ + conf = CONF + LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) + + queues = _get_matchmaker().queues(topic) + LOG.debug(_("Sending message(s) to: %s"), queues) + + # Don't stack if we have no matchmaker results + if len(queues) == 0: + LOG.warn(_("No matchmaker results. Not casting.")) + # While not strictly a timeout, callers know how to handle + # this exception and a timeout isn't too big a lie. + raise rpc_common.Timeout(_("No match from matchmaker.")) + + # This supports brokerless fanout (addresses > 1) + for queue in queues: + (_topic, ip_addr) = queue + _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) + + if method.__name__ == '_cast': + eventlet.spawn_n(method, _addr, context, + _topic, msg, timeout, envelope, + _msg_id) + return + return method(_addr, context, _topic, msg, timeout, + envelope) + + +def create_connection(conf, new=True): + return Connection(conf) + + +def multicall(conf, *args, **kwargs): + """Multiple calls.""" + return _multi_send(_call, *args, **kwargs) + + +def call(conf, *args, **kwargs): + """Send a message, expect a response.""" + data = _multi_send(_call, *args, **kwargs) + return data[-1] + + +def cast(conf, *args, **kwargs): + """Send a message expecting no reply.""" + _multi_send(_cast, *args, **kwargs) + + +def fanout_cast(conf, context, topic, msg, **kwargs): + """Send a message to all listening and expect no reply.""" + # NOTE(ewindisch): fanout~ is used because it avoid splitting on . + # and acts as a non-subtle hint to the matchmaker and ZmqProxy. + _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) + + +def notify(conf, context, topic, msg, envelope): + """ + Send notification event. + Notifications are sent to topic-priority. + This differs from the AMQP drivers which send to topic.priority. + """ + # NOTE(ewindisch): dot-priority in rpc notifier does not + # work with our assumptions. + topic = topic.replace('.', '-') + cast(conf, context, topic, msg, envelope=envelope) + + +def cleanup(): + """Clean up resources in use by implementation.""" + global ZMQ_CTX + if ZMQ_CTX: + ZMQ_CTX.term() + ZMQ_CTX = None + + global matchmaker + matchmaker = None + + +def _get_ctxt(): + if not zmq: + raise ImportError("Failed to import eventlet.green.zmq") + + global ZMQ_CTX + if not ZMQ_CTX: + ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) + return ZMQ_CTX + + +def _get_matchmaker(*args, **kwargs): + global matchmaker + if not matchmaker: + matchmaker = importutils.import_object( + CONF.rpc_zmq_matchmaker, *args, **kwargs) + return matchmaker diff --git a/cinder/openstack/common/rpc/matchmaker.py b/cinder/openstack/common/rpc/matchmaker.py new file mode 100644 index 0000000000..f12c14dfd1 --- /dev/null +++ b/cinder/openstack/common/rpc/matchmaker.py @@ -0,0 +1,425 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should except a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +import contextlib +import itertools +import json + +import eventlet +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +matchmaker_opts = [ + # Matchmaker ring file + cfg.StrOpt('matchmaker_ringfile', + default='/etc/nova/matchmaker_ring.json', + help='Matchmaker ring file (JSON)'), + cfg.IntOpt('matchmaker_heartbeat_freq', + default=300, + help='Heartbeat frequency'), + cfg.IntOpt('matchmaker_heartbeat_ttl', + default=600, + help='Heartbeat time-to-live.'), +] + +CONF = cfg.CONF +CONF.register_opts(matchmaker_opts) +LOG = logging.getLogger(__name__) +contextmanager = contextlib.contextmanager + + +class MatchMakerException(Exception): + """Signified a match could not be found.""" + message = _("Match not found by MatchMaker.") + + +class Exchange(object): + """ + Implements lookups. + Subclass this to support hashtables, dns, etc. + """ + def __init__(self): + pass + + def run(self, key): + raise NotImplementedError() + + +class Binding(object): + """ + A binding on which to perform a lookup. + """ + def __init__(self): + pass + + def test(self, key): + raise NotImplementedError() + + +class MatchMakerBase(object): + """ + Match Maker Base Class. + Build off HeartbeatMatchMakerBase if building a + heartbeat-capable MatchMaker. + """ + def __init__(self): + # Array of tuples. Index [2] toggles negation, [3] is last-if-true + self.bindings = [] + + self.no_heartbeat_msg = _('Matchmaker does not implement ' + 'registration or heartbeat.') + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + pass + + def ack_alive(self, key, host): + """ + Acknowledge that a key.host is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + pass + + def is_alive(self, topic, host): + """ + Checks if a host is alive. + """ + pass + + def expire(self, topic, host): + """ + Explicitly expire a host's registration. + """ + pass + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + pass + + def unregister(self, key, host): + """ + Unregister a topic. + """ + pass + + def start_heartbeat(self): + """ + Spawn heartbeat greenthread. + """ + pass + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + pass + + def add_binding(self, binding, rule, last=True): + self.bindings.append((binding, rule, False, last)) + + #NOTE(ewindisch): kept the following method in case we implement the + # underlying support. + #def add_negate_binding(self, binding, rule, last=True): + # self.bindings.append((binding, rule, True, last)) + + def queues(self, key): + workers = [] + + # bit is for negate bindings - if we choose to implement it. + # last stops processing rules if this matches. + for (binding, exchange, bit, last) in self.bindings: + if binding.test(key): + workers.extend(exchange.run(key)) + + # Support last. + if last: + return workers + return workers + + +class HeartbeatMatchMakerBase(MatchMakerBase): + """ + Base for a heart-beat capable MatchMaker. + Provides common methods for registering, + unregistering, and maintaining heartbeats. + """ + def __init__(self): + self.hosts = set() + self._heart = None + self.host_topic = {} + + super(HeartbeatMatchMakerBase, self).__init__() + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + for key, host in self.host_topic: + self.ack_alive(key, host) + + def ack_alive(self, key, host): + """ + Acknowledge that a host.topic is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + raise NotImplementedError("Must implement ack_alive") + + def backend_register(self, key, host): + """ + Implements registration logic. + Called by register(self,key,host) + """ + raise NotImplementedError("Must implement backend_register") + + def backend_unregister(self, key, key_host): + """ + Implements de-registration logic. + Called by unregister(self,key,host) + """ + raise NotImplementedError("Must implement backend_unregister") + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + self.hosts.add(host) + self.host_topic[(key, host)] = host + key_host = '.'.join((key, host)) + + self.backend_register(key, key_host) + + self.ack_alive(key, host) + + def unregister(self, key, host): + """ + Unregister a topic. + """ + if (key, host) in self.host_topic: + del self.host_topic[(key, host)] + + self.hosts.discard(host) + self.backend_unregister(key, '.'.join((key, host))) + + LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) + + def start_heartbeat(self): + """ + Implementation of MatchMakerBase.start_heartbeat + Launches greenthread looping send_heartbeats(), + yielding for CONF.matchmaker_heartbeat_freq seconds + between iterations. + """ + if len(self.hosts) == 0: + raise MatchMakerException( + _("Register before starting heartbeat.")) + + def do_heartbeat(): + while True: + self.send_heartbeats() + eventlet.sleep(CONF.matchmaker_heartbeat_freq) + + self._heart = eventlet.spawn(do_heartbeat) + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + if self._heart: + self._heart.kill() + + +class DirectBinding(Binding): + """ + Specifies a host in the key via a '.' character + Although dots are used in the key, the behavior here is + that it maps directly to a host, thus direct. + """ + def test(self, key): + if '.' in key: + return True + return False + + +class TopicBinding(Binding): + """ + Where a 'bare' key without dots. + AMQP generally considers topic exchanges to be those *with* dots, + but we deviate here in terminology as the behavior here matches + that of a topic exchange (whereas where there are dots, behavior + matches that of a direct exchange. + """ + def test(self, key): + if '.' not in key: + return True + return False + + +class FanoutBinding(Binding): + """Match on fanout keys, where key starts with 'fanout.' string.""" + def test(self, key): + if key.startswith('fanout~'): + return True + return False + + +class StubExchange(Exchange): + """Exchange that does nothing.""" + def run(self, key): + return [(key, None)] + + +class RingExchange(Exchange): + """ + Match Maker where hosts are loaded from a static file containing + a hashmap (JSON formatted). + + __init__ takes optional ring dictionary argument, otherwise + loads the ringfile from CONF.mathcmaker_ringfile. + """ + def __init__(self, ring=None): + super(RingExchange, self).__init__() + + if ring: + self.ring = ring + else: + fh = open(CONF.matchmaker_ringfile, 'r') + self.ring = json.load(fh) + fh.close() + + self.ring0 = {} + for k in self.ring.keys(): + self.ring0[k] = itertools.cycle(self.ring[k]) + + def _ring_has(self, key): + if key in self.ring0: + return True + return False + + +class RoundRobinRingExchange(RingExchange): + """A Topic Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(RoundRobinRingExchange, self).__init__(ring) + + def run(self, key): + if not self._ring_has(key): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (key, ) + ) + return [] + host = next(self.ring0[key]) + return [(key + '.' + host, host)] + + +class FanoutRingExchange(RingExchange): + """Fanout Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(FanoutRingExchange, self).__init__(ring) + + def run(self, key): + # Assume starts with "fanout~", strip it for lookup. + nkey = key.split('fanout~')[1:][0] + if not self._ring_has(nkey): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (nkey, ) + ) + return [] + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) + + +class LocalhostExchange(Exchange): + """Exchange where all direct topics are local.""" + def __init__(self, host='localhost'): + self.host = host + super(Exchange, self).__init__() + + def run(self, key): + return [('.'.join((key.split('.')[0], self.host)), self.host)] + + +class DirectExchange(Exchange): + """ + Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute.host" running on "host" + """ + def __init__(self): + super(Exchange, self).__init__() + + def run(self, key): + e = key.split('.', 1)[1] + return [(key, e)] + + +class MatchMakerRing(MatchMakerBase): + """ + Match Maker where hosts are loaded from a static hashmap. + """ + def __init__(self, ring=None): + super(MatchMakerRing, self).__init__() + self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) + self.add_binding(DirectBinding(), DirectExchange()) + self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) + + +class MatchMakerLocalhost(MatchMakerBase): + """ + Match Maker where all bare topics resolve to localhost. + Useful for testing. + """ + def __init__(self, host='localhost'): + super(MatchMakerLocalhost, self).__init__() + self.add_binding(FanoutBinding(), LocalhostExchange(host)) + self.add_binding(DirectBinding(), DirectExchange()) + self.add_binding(TopicBinding(), LocalhostExchange(host)) + + +class MatchMakerStub(MatchMakerBase): + """ + Match Maker where topics are untouched. + Useful for testing, or for AMQP/brokered queues. + Will not work where knowledge of hosts is known (i.e. zeromq) + """ + def __init__(self): + super(MatchMakerLocalhost, self).__init__() + + self.add_binding(FanoutBinding(), StubExchange()) + self.add_binding(DirectBinding(), StubExchange()) + self.add_binding(TopicBinding(), StubExchange()) diff --git a/cinder/openstack/common/rpc/matchmaker_redis.py b/cinder/openstack/common/rpc/matchmaker_redis.py new file mode 100644 index 0000000000..87f9fb2956 --- /dev/null +++ b/cinder/openstack/common/rpc/matchmaker_redis.py @@ -0,0 +1,149 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should accept a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +from oslo.config import cfg + +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import matchmaker as mm_common + +redis = importutils.try_import('redis') + + +matchmaker_redis_opts = [ + cfg.StrOpt('host', + default='127.0.0.1', + help='Host to locate redis'), + cfg.IntOpt('port', + default=6379, + help='Use this port to connect to redis host.'), + cfg.StrOpt('password', + default=None, + help='Password for Redis server. (optional)'), +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='matchmaker_redis', + title='Options for Redis-based MatchMaker') +CONF.register_group(opt_group) +CONF.register_opts(matchmaker_redis_opts, opt_group) +LOG = logging.getLogger(__name__) + + +class RedisExchange(mm_common.Exchange): + def __init__(self, matchmaker): + self.matchmaker = matchmaker + self.redis = matchmaker.redis + super(RedisExchange, self).__init__() + + +class RedisTopicExchange(RedisExchange): + """ + Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute" running on "host" + """ + def run(self, topic): + while True: + member_name = self.redis.srandmember(topic) + + if not member_name: + # If this happens, there are no + # longer any members. + break + + if not self.matchmaker.is_alive(topic, member_name): + continue + + host = member_name.split('.', 1)[1] + return [(member_name, host)] + return [] + + +class RedisFanoutExchange(RedisExchange): + """ + Return a list of all hosts. + """ + def run(self, topic): + topic = topic.split('~', 1)[1] + hosts = self.redis.smembers(topic) + good_hosts = filter( + lambda host: self.matchmaker.is_alive(topic, host), hosts) + + return [(x, x.split('.', 1)[1]) for x in good_hosts] + + +class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): + """ + MatchMaker registering and looking-up hosts with a Redis server. + """ + def __init__(self): + super(MatchMakerRedis, self).__init__() + + if not redis: + raise ImportError("Failed to import module redis.") + + self.redis = redis.StrictRedis( + host=CONF.matchmaker_redis.host, + port=CONF.matchmaker_redis.port, + password=CONF.matchmaker_redis.password) + + self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self)) + self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange()) + self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self)) + + def ack_alive(self, key, host): + topic = "%s.%s" % (key, host) + if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl): + # If we could not update the expiration, the key + # might have been pruned. Re-register, creating a new + # key in Redis. + self.register(self.topic_host[host], host) + + def is_alive(self, topic, host): + if self.redis.ttl(host) == -1: + self.expire(topic, host) + return False + return True + + def expire(self, topic, host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.delete(host) + pipe.srem(topic, host) + pipe.execute() + + def backend_register(self, key, key_host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.sadd(key, key_host) + + # No value is needed, we just + # care if it exists. Sets aren't viable + # because only keys can expire. + pipe.set(key_host, '') + + pipe.execute() + + def backend_unregister(self, key, key_host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.srem(key, key_host) + pipe.delete(key_host) + pipe.execute() diff --git a/cinder/openstack/common/rpc/proxy.py b/cinder/openstack/common/rpc/proxy.py new file mode 100644 index 0000000000..4ddc5c936a --- /dev/null +++ b/cinder/openstack/common/rpc/proxy.py @@ -0,0 +1,179 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A helper class for proxy objects to remote APIs. + +For more information about rpc API version numbers, see: + rpc/dispatcher.py +""" + + +from cinder.openstack.common import rpc + + +class RpcProxy(object): + """A helper class for rpc clients. + + This class is a wrapper around the RPC client API. It allows you to + specify the topic and API version in a single place. This is intended to + be used as a base class for a class that implements the client side of an + rpc API. + """ + + def __init__(self, topic, default_version): + """Initialize an RpcProxy. + + :param topic: The topic to use for all messages. + :param default_version: The default API version to request in all + outgoing messages. This can be overridden on a per-message + basis. + """ + self.topic = topic + self.default_version = default_version + super(RpcProxy, self).__init__() + + def _set_version(self, msg, vers): + """Helper method to set the version in a message. + + :param msg: The message having a version added to it. + :param vers: The version number to add to the message. + """ + msg['version'] = vers if vers else self.default_version + + def _get_topic(self, topic): + """Return the topic to use for a message.""" + return topic if topic else self.topic + + @staticmethod + def make_namespaced_msg(method, namespace, **kwargs): + return {'method': method, 'namespace': namespace, 'args': kwargs} + + @staticmethod + def make_msg(method, **kwargs): + return RpcProxy.make_namespaced_msg(method, None, **kwargs) + + def call(self, context, msg, topic=None, version=None, timeout=None): + """rpc.call() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + :param timeout: (Optional) A timeout to use when waiting for the + response. If no timeout is specified, a default timeout will be + used that is usually sufficient. + + :returns: The return value from the remote method. + """ + self._set_version(msg, version) + real_topic = self._get_topic(topic) + try: + return rpc.call(context, real_topic, msg, timeout) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) + + def multicall(self, context, msg, topic=None, version=None, timeout=None): + """rpc.multicall() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + :param timeout: (Optional) A timeout to use when waiting for the + response. If no timeout is specified, a default timeout will be + used that is usually sufficient. + + :returns: An iterator that lets you process each of the returned values + from the remote method as they arrive. + """ + self._set_version(msg, version) + real_topic = self._get_topic(topic) + try: + return rpc.multicall(context, real_topic, msg, timeout) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) + + def cast(self, context, msg, topic=None, version=None): + """rpc.cast() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.cast() does not wait on any return value from the + remote method. + """ + self._set_version(msg, version) + rpc.cast(context, self._get_topic(topic), msg) + + def fanout_cast(self, context, msg, topic=None, version=None): + """rpc.fanout_cast() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.fanout_cast() does not wait on any return value + from the remote method. + """ + self._set_version(msg, version) + rpc.fanout_cast(context, self._get_topic(topic), msg) + + def cast_to_server(self, context, server_params, msg, topic=None, + version=None): + """rpc.cast_to_server() a remote method. + + :param context: The request context + :param server_params: Server parameters. See rpc.cast_to_server() for + details. + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.cast_to_server() does not wait on any + return values. + """ + self._set_version(msg, version) + rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) + + def fanout_cast_to_server(self, context, server_params, msg, topic=None, + version=None): + """rpc.fanout_cast_to_server() a remote method. + + :param context: The request context + :param server_params: Server parameters. See rpc.cast_to_server() for + details. + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.fanout_cast_to_server() does not wait on any + return values. + """ + self._set_version(msg, version) + rpc.fanout_cast_to_server(context, server_params, + self._get_topic(topic), msg) diff --git a/cinder/openstack/common/rpc/service.py b/cinder/openstack/common/rpc/service.py new file mode 100644 index 0000000000..b1f997d38f --- /dev/null +++ b/cinder/openstack/common/rpc/service.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher +from cinder.openstack.common import service + + +LOG = logging.getLogger(__name__) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host.""" + def __init__(self, host, topic, manager=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, dispatcher, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, dispatcher, fanout=False) + + self.conn.create_consumer(self.topic, dispatcher, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() diff --git a/cinder/openstack/common/rpc/zmq_receiver.py b/cinder/openstack/common/rpc/zmq_receiver.py new file mode 100755 index 0000000000..e4c6ee30ed --- /dev/null +++ b/cinder/openstack/common/rpc/zmq_receiver.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +eventlet.monkey_patch() + +import contextlib +import sys + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import impl_zmq + +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) + + +def main(): + CONF(sys.argv[1:], project='oslo') + logging.setup("oslo") + + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() diff --git a/cinder/openstack/common/scheduler/__init__.py b/cinder/openstack/common/scheduler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/openstack/common/scheduler/filter.py b/cinder/openstack/common/scheduler/filter.py new file mode 100644 index 0000000000..52c18afa37 --- /dev/null +++ b/cinder/openstack/common/scheduler/filter.py @@ -0,0 +1,71 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Filter support +""" + +import inspect + +from stevedore import extension + + +class BaseFilter(object): + """Base class for all filter classes.""" + def _filter_one(self, obj, filter_properties): + """Return True if it passes the filter, False otherwise. + Override this in a subclass. + """ + return True + + def filter_all(self, filter_obj_list, filter_properties): + """Yield objects that pass the filter. + + Can be overriden in a subclass, if you need to base filtering + decisions on all objects. Otherwise, one can just override + _filter_one() to filter a single object. + """ + for obj in filter_obj_list: + if self._filter_one(obj, filter_properties): + yield obj + + +class BaseFilterHandler(object): + """ Base class to handle loading filter classes. + + This class should be subclassed where one needs to use filters. + """ + def __init__(self, filter_class_type, filter_namespace): + self.namespace = filter_namespace + self.filter_class_type = filter_class_type + self.filter_manager = extension.ExtensionManager(filter_namespace) + + def _is_correct_class(self, obj): + """Return whether an object is a class of the correct type and + is not prefixed with an underscore. + """ + return (inspect.isclass(obj) and + not obj.__name__.startswith('_') and + issubclass(obj, self.filter_class_type)) + + def get_all_classes(self): + return [x.plugin for x in self.filter_manager + if self._is_correct_class(x.plugin)] + + def get_filtered_objects(self, filter_classes, objs, + filter_properties): + for filter_cls in filter_classes: + objs = filter_cls().filter_all(objs, filter_properties) + return list(objs) diff --git a/cinder/openstack/common/scheduler/filters/__init__.py b/cinder/openstack/common/scheduler/filters/__init__.py new file mode 100644 index 0000000000..40bf096a41 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler host filters +""" + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filter + +LOG = logging.getLogger(__name__) + + +class BaseHostFilter(filter.BaseFilter): + """Base class for host filters.""" + def _filter_one(self, obj, filter_properties): + """Return True if the object passes the filter, otherwise False.""" + return self.host_passes(obj, filter_properties) + + def host_passes(self, host_state, filter_properties): + """Return True if the HostState passes the filter, otherwise False. + Override this in a subclass. + """ + raise NotImplementedError() + + +class HostFilterHandler(filter.BaseFilterHandler): + def __init__(self, namespace): + super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) diff --git a/cinder/openstack/common/scheduler/filters/availability_zone_filter.py b/cinder/openstack/common/scheduler/filters/availability_zone_filter.py new file mode 100644 index 0000000000..0c3ca1ef74 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/availability_zone_filter.py @@ -0,0 +1,30 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common.scheduler import filters + + +class AvailabilityZoneFilter(filters.BaseHostFilter): + """Filters Hosts by availability zone.""" + + def host_passes(self, host_state, filter_properties): + spec = filter_properties.get('request_spec', {}) + props = spec.get('resource_properties', []) + availability_zone = props.get('availability_zone') + + if availability_zone: + return availability_zone == host_state.service['availability_zone'] + return True diff --git a/cinder/openstack/common/scheduler/filters/capabilities_filter.py b/cinder/openstack/common/scheduler/filters/capabilities_filter.py new file mode 100644 index 0000000000..ae859e927a --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/capabilities_filter.py @@ -0,0 +1,63 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common.scheduler.filters import extra_specs_ops + + +LOG = logging.getLogger(__name__) + + +class CapabilitiesFilter(filters.BaseHostFilter): + """HostFilter to work with resource (instance & volume) type records.""" + + def _satisfies_extra_specs(self, capabilities, resource_type): + """Check that the capabilities provided by the services + satisfy the extra specs associated with the instance type.""" + extra_specs = resource_type.get('extra_specs', []) + if not extra_specs: + return True + + for key, req in extra_specs.iteritems(): + # Either not scope format, or in capabilities scope + scope = key.split(':') + if len(scope) > 1 and scope[0] != "capabilities": + continue + elif scope[0] == "capabilities": + del scope[0] + + cap = capabilities + for index in range(0, len(scope)): + try: + cap = cap.get(scope[index], None) + except AttributeError: + return False + if cap is None: + return False + if not extra_specs_ops.match(cap, req): + return False + return True + + def host_passes(self, host_state, filter_properties): + """Return a list of hosts that can create instance_type.""" + # Note(zhiteng) Currently only Cinder and Nova are using + # this filter, so the resource type is either instance or + # volume. + resource_type = filter_properties.get('resource_type') + if not self._satisfies_extra_specs(host_state.capabilities, + resource_type): + return False + return True diff --git a/cinder/openstack/common/scheduler/filters/extra_specs_ops.py b/cinder/openstack/common/scheduler/filters/extra_specs_ops.py new file mode 100644 index 0000000000..b3730f5af4 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/extra_specs_ops.py @@ -0,0 +1,72 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import operator + +from cinder.openstack.common import strutils + +# 1. The following operations are supported: +# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= +# 2. Note that is handled in a different way below. +# 3. If the first word in the extra_specs is not one of the operators, +# it is ignored. +_op_methods = {'=': lambda x, y: float(x) >= float(y), + '': lambda x, y: y in x, + '': lambda x, y: (strutils.bool_from_string(x) is + strutils.bool_from_string(y)), + '==': lambda x, y: float(x) == float(y), + '!=': lambda x, y: float(x) != float(y), + '>=': lambda x, y: float(x) >= float(y), + '<=': lambda x, y: float(x) <= float(y), + 's==': operator.eq, + 's!=': operator.ne, + 's<': operator.lt, + 's<=': operator.le, + 's>': operator.gt, + 's>=': operator.ge} + + +def match(value, req): + words = req.split() + + op = method = None + if words: + op = words.pop(0) + method = _op_methods.get(op) + + if op != '' and not method: + return value == req + + if value is None: + return False + + if op == '': # Ex: v1 v2 v3 + while True: + if words.pop(0) == value: + return True + if not words: + break + op = words.pop(0) # remove a keyword + if not words: + break + return False + + try: + if words and method(value, words[0]): + return True + except ValueError: + pass + + return False diff --git a/cinder/openstack/common/scheduler/filters/json_filter.py b/cinder/openstack/common/scheduler/filters/json_filter.py new file mode 100644 index 0000000000..370f23b2ae --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/json_filter.py @@ -0,0 +1,150 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import operator + +from cinder.openstack.common import jsonutils +from cinder.openstack.common.scheduler import filters + + +class JsonFilter(filters.BaseHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_compare(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + if op is operator.contains: + bad = args[0] not in args[1:] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_compare(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_compare(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_compare(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms""" + return self._op_compare(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_compare(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_compare(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def _parse_string(self, string, host_state): + """Strings prefixed with $ are capability lookups in the + form '$variable' where 'variable' is an attribute in the + HostState class. If $variable is a dictionary, you may + use: $variable.dictkey + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + obj = getattr(host_state, path[0], None) + if obj is None: + return None + for item in path[1:]: + obj = obj.get(item, None) + if obj is None: + return None + return obj + + def _process_filter(self, query, host_state): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(arg, host_state) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host_state) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def host_passes(self, host_state, filter_properties): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + # TODO(zhiteng) Add description for filter_properties structure + # and scheduler_hints. + try: + query = filter_properties['scheduler_hints']['query'] + except KeyError: + query = None + if not query: + return True + + # NOTE(comstud): Not checking capabilities or service for + # enabled/disabled so that a provided json filter can decide + + result = self._process_filter(jsonutils.loads(query), host_state) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + # Filter it out. + return True + return False diff --git a/cinder/openstack/common/scheduler/weight.py b/cinder/openstack/common/scheduler/weight.py new file mode 100644 index 0000000000..82f1d25ee3 --- /dev/null +++ b/cinder/openstack/common/scheduler/weight.py @@ -0,0 +1,91 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Pluggable Weighing support +""" + +import inspect + +from stevedore import extension + + +class WeighedObject(object): + """Object with weight information.""" + def __init__(self, obj, weight): + self.obj = obj + self.weight = weight + + def __repr__(self): + return "" % (self.obj, self.weight) + + +class BaseWeigher(object): + """Base class for pluggable weighers.""" + def _weight_multiplier(self): + """How weighted this weigher should be. Normally this would + be overriden in a subclass based on a config value. + """ + return 1.0 + + def _weigh_object(self, obj, weight_properties): + """Override in a subclass to specify a weight for a specific + object. + """ + return 0.0 + + def weigh_objects(self, weighed_obj_list, weight_properties): + """Weigh multiple objects. Override in a subclass if you need + need access to all objects in order to manipulate weights. + """ + constant = self._weight_multiplier() + for obj in weighed_obj_list: + obj.weight += (constant * + self._weigh_object(obj.obj, weight_properties)) + + +class BaseWeightHandler(object): + object_class = WeighedObject + + def __init__(self, weighed_object_type, weight_namespace): + self.namespace = weight_namespace + self.weighed_object_type = weighed_object_type + self.weight_manager = extension.ExtensionManager(weight_namespace) + + def _is_correct_class(self, obj): + """Return whether an object is a class of the correct type and + is not prefixed with an underscore. + """ + return (inspect.isclass(obj) and + not obj.__name__.startswith('_') and + issubclass(obj, self.weighed_object_type)) + + def get_all_classes(self): + return [x.plugin for x in self.weight_manager + if self._is_correct_class(x.plugin)] + + def get_weighed_objects(self, weigher_classes, obj_list, + weighing_properties): + """Return a sorted (highest score first) list of WeighedObjects.""" + + if not obj_list: + return [] + + weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] + for weigher_cls in weigher_classes: + weigher = weigher_cls() + weigher.weigh_objects(weighed_objs, weighing_properties) + + return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) diff --git a/cinder/openstack/common/scheduler/weights/__init__.py b/cinder/openstack/common/scheduler/weights/__init__.py new file mode 100644 index 0000000000..a2743577d7 --- /dev/null +++ b/cinder/openstack/common/scheduler/weights/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler host weights +""" + + +from cinder.openstack.common.scheduler import weight + + +class WeighedHost(weight.WeighedObject): + def to_dict(self): + return { + 'weight': self.weight, + 'host': self.obj.host, + } + + def __repr__(self): + return ("WeighedHost [host: %s, weight: %s]" % + (self.obj.host, self.weight)) + + +class BaseHostWeigher(weight.BaseWeigher): + """Base class for host weights.""" + pass + + +class HostWeightHandler(weight.BaseWeightHandler): + object_class = WeighedHost + + def __init__(self, namespace): + super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) diff --git a/cinder/openstack/common/service.py b/cinder/openstack/common/service.py new file mode 100644 index 0000000000..8600a0b087 --- /dev/null +++ b/cinder/openstack/common/service.py @@ -0,0 +1,332 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import os +import random +import signal +import sys +import time + +import eventlet +import logging as std_logging +from oslo.config import cfg + +from cinder.openstack.common import eventlet_backdoor +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import threadgroup + + +rpc = importutils.try_import('cinder.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = threadgroup.ThreadGroup() + eventlet_backdoor.initialize_if_enabled() + + @staticmethod + def run_service(service): + """Start and wait for a service to finish. + + :param service: service to run and wait for. + :returns: None + + """ + service.start() + service.wait() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + self._services.add_thread(self.run_service, service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self._services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self._services.wait() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + raise SignalExit(signo) + + def wait(self): + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + status = None + try: + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + finally: + if rpc: + rpc.cleanup() + self.stop() + return status + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process(self, service): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + signal.signal(signal.SIGTERM, _sigterm) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.run_service(service) + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + status = 0 + try: + self._child_process(wrap.service) + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + wrap.service.stop() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def wait(self): + """Loop waiting on children to die and respawning as necessary""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(.01) + continue + + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + if self.sigcaught: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[self.sigcaught] + LOG.info(_('Caught %s, stopping children'), signame) + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + def start(self): + pass + + def stop(self): + self.tg.stop() + + def wait(self): + self.tg.wait() + + +def launch(service, workers=None): + if workers: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + else: + launcher = ServiceLauncher() + launcher.launch_service(service) + return launcher diff --git a/cinder/openstack/common/strutils.py b/cinder/openstack/common/strutils.py new file mode 100644 index 0000000000..a4e3899725 --- /dev/null +++ b/cinder/openstack/common/strutils.py @@ -0,0 +1,150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import sys + +from cinder.openstack.common.gettextutils import _ + + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + + +def int_from_bool_as_string(subject): + """ + Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False): + """ + Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else is considered False. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, basestring): + subject = str(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return False + + +def safe_decode(text, incoming=None, errors='strict'): + """ + Decodes incoming str using `incoming` if they're + not already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, unicode): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """ + Encodes incoming str/unicode using `encoding`. If + incoming is not specified, text is expected to + be encoded with current python's default encoding. + (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, unicode): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + + return text diff --git a/cinder/openstack/common/threadgroup.py b/cinder/openstack/common/threadgroup.py new file mode 100644 index 0000000000..5d6ec006b9 --- /dev/null +++ b/cinder/openstack/common/threadgroup.py @@ -0,0 +1,114 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenlet +from eventlet import greenpool +from eventlet import greenthread + +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """ Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """ Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + +class ThreadGroup(object): + """ The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + continue + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/cinder/openstack/common/timeutils.py b/cinder/openstack/common/timeutils.py new file mode 100644 index 0000000000..6094365907 --- /dev/null +++ b/cinder/openstack/common/timeutils.py @@ -0,0 +1,186 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime + +import iso8601 + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(e.message) + except TypeError as e: + raise ValueError(e.message) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, basestring): + before = parse_strtime(before).replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, basestring): + after = parse_strtime(after).replace(tzinfo=None) + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """ + Override utils.utcnow to return a constant time or a list thereof, + one at a time. + """ + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times.""" + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """ + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """ + Determines if time is going to happen in the next window seconds. + + :params dt: the time + :params window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/cinder/openstack/common/uuidutils.py b/cinder/openstack/common/uuidutils.py new file mode 100644 index 0000000000..7608acb942 --- /dev/null +++ b/cinder/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/cinder/policy.py b/cinder/policy.py new file mode 100644 index 0000000000..9b2b4defd1 --- /dev/null +++ b/cinder/policy.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Policy Engine For Cinder""" + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import policy +from cinder import utils + +policy_opts = [ + cfg.StrOpt('policy_file', + default='policy.json', + help=_('JSON file representing policy')), + cfg.StrOpt('policy_default_rule', + default='default', + help=_('Rule checked when requested rule is not found')), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(policy_opts) + +_POLICY_PATH = None +_POLICY_CACHE = {} + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = utils.find_config(FLAGS.policy_file) + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_brain) + + +def _set_brain(data): + default_rule = FLAGS.policy_default_rule + policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) + + +def enforce(context, action, target): + """Verifies that the action is valid on the target in this context. + + :param context: cinder context + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + + :param object: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + + :raises cinder.exception.PolicyNotAuthorized: if verification fails. + + """ + init() + + match_list = ('rule:%s' % action,) + credentials = context.to_dict() + + policy.enforce(match_list, target, credentials, + exception.PolicyNotAuthorized, action=action) + + +def check_is_admin(roles): + """Whether or not roles contains 'admin' role according to policy setting. + + """ + init() + + action = 'context_is_admin' + match_list = ('rule:%s' % action,) + # include project_id on target to avoid KeyError if context_is_admin + # policy definition is missing, and default admin_or_owner rule + # attempts to apply. Since our credentials dict does not include a + # project_id, this target can never match as a generic rule. + target = {'project_id': ''} + credentials = {'roles': roles} + + return policy.enforce(match_list, target, credentials) diff --git a/cinder/quota.py b/cinder/quota.py new file mode 100644 index 0000000000..59868d33d2 --- /dev/null +++ b/cinder/quota.py @@ -0,0 +1,813 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Quotas for volumes.""" + +import datetime + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + +quota_opts = [ + cfg.IntOpt('quota_volumes', + default=10, + help='number of volumes allowed per project'), + cfg.IntOpt('quota_snapshots', + default=10, + help='number of volume snapshots allowed per project'), + cfg.IntOpt('quota_gigabytes', + default=1000, + help='number of volume gigabytes (snapshots are also included) ' + 'allowed per project'), + cfg.IntOpt('reservation_expire', + default=86400, + help='number of seconds until a reservation expires'), + cfg.IntOpt('until_refresh', + default=0, + help='count of reservations until usage is refreshed'), + cfg.IntOpt('max_age', + default=0, + help='number of seconds between subsequent usage refreshes'), + cfg.StrOpt('quota_driver', + default='cinder.quota.DbQuotaDriver', + help='default driver to use for quota checks'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(quota_opts) + + +class DbQuotaDriver(object): + """ + Driver to perform necessary checks to enforce quotas and obtain + quota information. The default driver utilizes the local + database. + """ + + def get_by_project(self, context, project_id, resource): + """Get a specific quota by project.""" + + return db.quota_get(context, project_id, resource) + + def get_by_class(self, context, quota_class, resource): + """Get a specific quota by quota class.""" + + return db.quota_class_get(context, quota_class, resource) + + def get_defaults(self, context, resources): + """Given a list of resources, retrieve the default quotas. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + """ + + quotas = {} + for resource in resources.values(): + quotas[resource.name] = resource.default + + return quotas + + def get_class_quotas(self, context, resources, quota_class, + defaults=True): + """ + Given a list of resources, retrieve the quotas for the given + quota class. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param quota_class: The name of the quota class to return + quotas for. + :param defaults: If True, the default value will be reported + if there is no specific value for the + resource. + """ + + quotas = {} + class_quotas = db.quota_class_get_all_by_name(context, quota_class) + for resource in resources.values(): + if defaults or resource.name in class_quotas: + quotas[resource.name] = class_quotas.get(resource.name, + resource.default) + + return quotas + + def get_project_quotas(self, context, resources, project_id, + quota_class=None, defaults=True, + usages=True): + """ + Given a list of resources, retrieve the quotas for the given + project. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param project_id: The ID of the project to return quotas for. + :param quota_class: If project_id != context.project_id, the + quota class cannot be determined. This + parameter allows it to be specified. It + will be ignored if project_id == + context.project_id. + :param defaults: If True, the quota class value (or the + default value, if there is no value from the + quota class) will be reported if there is no + specific value for the resource. + :param usages: If True, the current in_use and reserved counts + will also be returned. + """ + + quotas = {} + project_quotas = db.quota_get_all_by_project(context, project_id) + if usages: + project_usages = db.quota_usage_get_all_by_project(context, + project_id) + + # Get the quotas for the appropriate class. If the project ID + # matches the one in the context, we use the quota_class from + # the context, otherwise, we use the provided quota_class (if + # any) + if project_id == context.project_id: + quota_class = context.quota_class + if quota_class: + class_quotas = db.quota_class_get_all_by_name(context, quota_class) + else: + class_quotas = {} + + for resource in resources.values(): + # Omit default/quota class values + if not defaults and resource.name not in project_quotas: + continue + + quotas[resource.name] = dict( + limit=project_quotas.get(resource.name, + class_quotas.get(resource.name, + resource.default)), ) + + # Include usages if desired. This is optional because one + # internal consumer of this interface wants to access the + # usages directly from inside a transaction. + if usages: + usage = project_usages.get(resource.name, {}) + quotas[resource.name].update( + in_use=usage.get('in_use', 0), + reserved=usage.get('reserved', 0), ) + + return quotas + + def _get_quotas(self, context, resources, keys, has_sync, project_id=None): + """ + A helper method which retrieves the quotas for the specific + resources identified by keys, and which apply to the current + context. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param keys: A list of the desired quotas to retrieve. + :param has_sync: If True, indicates that the resource must + have a sync attribute; if False, indicates + that the resource must NOT have a sync + attribute. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + # Filter resources + if has_sync: + sync_filt = lambda x: hasattr(x, 'sync') + else: + sync_filt = lambda x: not hasattr(x, 'sync') + desired = set(keys) + sub_resources = dict((k, v) for k, v in resources.items() + if k in desired and sync_filt(v)) + + # Make sure we accounted for all of them... + if len(keys) != len(sub_resources): + unknown = desired - set(sub_resources.keys()) + raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) + + # Grab and return the quotas (without usages) + quotas = self.get_project_quotas(context, sub_resources, + project_id, + context.quota_class, usages=False) + + return dict((k, v['limit']) for k, v in quotas.items()) + + def limit_check(self, context, resources, values, project_id=None): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param values: A dictionary of the values to check against the + quota. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + # Ensure no value is less than zero + unders = [key for key, val in values.items() if val < 0] + if unders: + raise exception.InvalidQuotaValue(unders=sorted(unders)) + + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + # Get the applicable quotas + quotas = self._get_quotas(context, resources, values.keys(), + has_sync=False, project_id=project_id) + # Check the quotas and construct a list of the resources that + # would be put over limit by the desired values + overs = [key for key, val in values.items() + if quotas[key] >= 0 and quotas[key] < val] + if overs: + raise exception.OverQuota(overs=sorted(overs), quotas=quotas, + usages={}) + + def reserve(self, context, resources, deltas, expire=None, + project_id=None): + """Check quotas and reserve resources. + + For counting quotas--those quotas for which there is a usage + synchronization function--this method checks quotas against + current usage and the desired deltas. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it does not have a usage + synchronization function. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns a + list of reservation UUIDs which were created. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param deltas: A dictionary of the proposed delta changes. + :param expire: An optional parameter specifying an expiration + time for the reservations. If it is a simple + number, it is interpreted as a number of + seconds and added to the current time; if it is + a datetime.timedelta object, it will also be + added to the current time. A datetime.datetime + object will be interpreted as the absolute + expiration time. If None is specified, the + default expiration time set by + --default-reservation-expire will be used (this + value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + # Set up the reservation expiration + if expire is None: + expire = FLAGS.reservation_expire + if isinstance(expire, (int, long)): + expire = datetime.timedelta(seconds=expire) + if isinstance(expire, datetime.timedelta): + expire = timeutils.utcnow() + expire + if not isinstance(expire, datetime.datetime): + raise exception.InvalidReservationExpiration(expire=expire) + + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + # Get the applicable quotas. + # NOTE(Vek): We're not worried about races at this point. + # Yes, the admin may be in the process of reducing + # quotas, but that's a pretty rare thing. + quotas = self._get_quotas(context, resources, deltas.keys(), + has_sync=True, project_id=project_id) + + # NOTE(Vek): Most of the work here has to be done in the DB + # API, because we have to do it in a transaction, + # which means access to the session. Since the + # session isn't available outside the DBAPI, we + # have to do the work there. + return db.quota_reserve(context, resources, quotas, deltas, expire, + FLAGS.until_refresh, FLAGS.max_age, + project_id=project_id) + + def commit(self, context, reservations, project_id=None): + """Commit reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + db.reservation_commit(context, reservations, project_id=project_id) + + def rollback(self, context, reservations, project_id=None): + """Roll back reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + + db.reservation_rollback(context, reservations, project_id=project_id) + + def destroy_all_by_project(self, context, project_id): + """ + Destroy all quotas, usages, and reservations associated with a + project. + + :param context: The request context, for access checks. + :param project_id: The ID of the project being deleted. + """ + + db.quota_destroy_all_by_project(context, project_id) + + def expire(self, context): + """Expire reservations. + + Explores all currently existing reservations and rolls back + any that have expired. + + :param context: The request context, for access checks. + """ + + db.reservation_expire(context) + + +class BaseResource(object): + """Describe a single resource for quota checking.""" + + def __init__(self, name, flag=None): + """ + Initializes a Resource. + + :param name: The name of the resource, i.e., "volumes". + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + self.name = name + self.flag = flag + + def quota(self, driver, context, **kwargs): + """ + Given a driver and context, obtain the quota for this + resource. + + :param driver: A quota driver. + :param context: The request context. + :param project_id: The project to obtain the quota value for. + If not provided, it is taken from the + context. If it is given as None, no + project-specific quota will be searched + for. + :param quota_class: The quota class corresponding to the + project, or for which the quota is to be + looked up. If not provided, it is taken + from the context. If it is given as None, + no quota class-specific quota will be + searched for. Note that the quota class + defaults to the value in the context, + which may not correspond to the project if + project_id is not the same as the one in + the context. + """ + + # Get the project ID + project_id = kwargs.get('project_id', context.project_id) + + # Ditto for the quota class + quota_class = kwargs.get('quota_class', context.quota_class) + + # Look up the quota for the project + if project_id: + try: + return driver.get_by_project(context, project_id, self.name) + except exception.ProjectQuotaNotFound: + pass + + # Try for the quota class + if quota_class: + try: + return driver.get_by_class(context, quota_class, self.name) + except exception.QuotaClassNotFound: + pass + + # OK, return the default + return self.default + + @property + def default(self): + """Return the default value of the quota.""" + + return FLAGS[self.flag] if self.flag else -1 + + +class ReservableResource(BaseResource): + """Describe a reservable resource.""" + + def __init__(self, name, sync, flag=None): + """ + Initializes a ReservableResource. + + Reservable resources are those resources which directly + correspond to objects in the database, i.e., volumes, gigabytes, + etc. A ReservableResource must be constructed with a usage + synchronization function, which will be called to determine the + current counts of one or more resources. + + The usage synchronization function will be passed three + arguments: an admin context, the project ID, and an opaque + session object, which should in turn be passed to the + underlying database function. Synchronization functions + should return a dictionary mapping resource names to the + current in_use count for those resources; more than one + resource and resource count may be returned. Note that + synchronization functions may be associated with more than one + ReservableResource. + + :param name: The name of the resource, i.e., "volumes". + :param sync: A callable which returns a dictionary to + resynchronize the in_use count for one or more + resources, as described above. + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + super(ReservableResource, self).__init__(name, flag=flag) + self.sync = sync + + +class AbsoluteResource(BaseResource): + """Describe a non-reservable resource.""" + + pass + + +class CountableResource(AbsoluteResource): + """ + Describe a resource where the counts aren't based solely on the + project ID. + """ + + def __init__(self, name, count, flag=None): + """ + Initializes a CountableResource. + + Countable resources are those resources which directly + correspond to objects in the database, i.e., volumes, gigabytes, + etc., but for which a count by project ID is inappropriate. A + CountableResource must be constructed with a counting + function, which will be called to determine the current counts + of the resource. + + The counting function will be passed the context, along with + the extra positional and keyword arguments that are passed to + Quota.count(). It should return an integer specifying the + count. + + Note that this counting is not performed in a transaction-safe + manner. This resource class is a temporary measure to provide + required functionality, until a better approach to solving + this problem can be evolved. + + :param name: The name of the resource, i.e., "volumes". + :param count: A callable which returns the count of the + resource. The arguments passed are as described + above. + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + super(CountableResource, self).__init__(name, flag=flag) + self.count = count + + +class QuotaEngine(object): + """Represent the set of recognized quotas.""" + + def __init__(self, quota_driver_class=None): + """Initialize a Quota object.""" + + if not quota_driver_class: + quota_driver_class = FLAGS.quota_driver + + if isinstance(quota_driver_class, basestring): + quota_driver_class = importutils.import_object(quota_driver_class) + + self._resources = {} + self._driver = quota_driver_class + + def __contains__(self, resource): + return resource in self._resources + + def register_resource(self, resource): + """Register a resource.""" + + self._resources[resource.name] = resource + + def register_resources(self, resources): + """Register a list of resources.""" + + for resource in resources: + self.register_resource(resource) + + def get_by_project(self, context, project_id, resource): + """Get a specific quota by project.""" + + return self._driver.get_by_project(context, project_id, resource) + + def get_by_class(self, context, quota_class, resource): + """Get a specific quota by quota class.""" + + return self._driver.get_by_class(context, quota_class, resource) + + def get_defaults(self, context): + """Retrieve the default quotas. + + :param context: The request context, for access checks. + """ + + return self._driver.get_defaults(context, self._resources) + + def get_class_quotas(self, context, quota_class, defaults=True): + """Retrieve the quotas for the given quota class. + + :param context: The request context, for access checks. + :param quota_class: The name of the quota class to return + quotas for. + :param defaults: If True, the default value will be reported + if there is no specific value for the + resource. + """ + + return self._driver.get_class_quotas(context, self._resources, + quota_class, defaults=defaults) + + def get_project_quotas(self, context, project_id, quota_class=None, + defaults=True, usages=True): + """Retrieve the quotas for the given project. + + :param context: The request context, for access checks. + :param project_id: The ID of the project to return quotas for. + :param quota_class: If project_id != context.project_id, the + quota class cannot be determined. This + parameter allows it to be specified. + :param defaults: If True, the quota class value (or the + default value, if there is no value from the + quota class) will be reported if there is no + specific value for the resource. + :param usages: If True, the current in_use and reserved counts + will also be returned. + """ + + return self._driver.get_project_quotas(context, self._resources, + project_id, + quota_class=quota_class, + defaults=defaults, + usages=usages) + + def count(self, context, resource, *args, **kwargs): + """Count a resource. + + For countable resources, invokes the count() function and + returns its result. Arguments following the context and + resource are passed directly to the count function declared by + the resource. + + :param context: The request context, for access checks. + :param resource: The name of the resource, as a string. + """ + + # Get the resource + res = self._resources.get(resource) + if not res or not hasattr(res, 'count'): + raise exception.QuotaResourceUnknown(unknown=[resource]) + + return res.count(context, *args, **kwargs) + + def limit_check(self, context, project_id=None, **values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. The + values to check are given as keyword arguments, where the key + identifies the specific quota limit to check, and the value is + the proposed value. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + return self._driver.limit_check(context, self._resources, values, + project_id=project_id) + + def reserve(self, context, expire=None, project_id=None, **deltas): + """Check quotas and reserve resources. + + For counting quotas--those quotas for which there is a usage + synchronization function--this method checks quotas against + current usage and the desired deltas. The deltas are given as + keyword arguments, and current usage and other reservations + are factored into the quota check. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it does not have a usage + synchronization function. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns a + list of reservation UUIDs which were created. + + :param context: The request context, for access checks. + :param expire: An optional parameter specifying an expiration + time for the reservations. If it is a simple + number, it is interpreted as a number of + seconds and added to the current time; if it is + a datetime.timedelta object, it will also be + added to the current time. A datetime.datetime + object will be interpreted as the absolute + expiration time. If None is specified, the + default expiration time set by + --default-reservation-expire will be used (this + value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + reservations = self._driver.reserve(context, self._resources, deltas, + expire=expire, + project_id=project_id) + + LOG.debug(_("Created reservations %(reservations)s") % locals()) + + return reservations + + def commit(self, context, reservations, project_id=None): + """Commit reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + try: + self._driver.commit(context, reservations, project_id=project_id) + except Exception: + # NOTE(Vek): Ignoring exceptions here is safe, because the + # usage resynchronization and the reservation expiration + # mechanisms will resolve the issue. The exception is + # logged, however, because this is less than optimal. + LOG.exception(_("Failed to commit reservations " + "%(reservations)s") % locals()) + + def rollback(self, context, reservations, project_id=None): + """Roll back reservations. + + :param context: The request context, for access checks. + :param reservations: A list of the reservation UUIDs, as + returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. + """ + + try: + self._driver.rollback(context, reservations, project_id=project_id) + except Exception: + # NOTE(Vek): Ignoring exceptions here is safe, because the + # usage resynchronization and the reservation expiration + # mechanisms will resolve the issue. The exception is + # logged, however, because this is less than optimal. + LOG.exception(_("Failed to roll back reservations " + "%(reservations)s") % locals()) + + def destroy_all_by_project(self, context, project_id): + """ + Destroy all quotas, usages, and reservations associated with a + project. + + :param context: The request context, for access checks. + :param project_id: The ID of the project being deleted. + """ + + self._driver.destroy_all_by_project(context, project_id) + + def expire(self, context): + """Expire reservations. + + Explores all currently existing reservations and rolls back + any that have expired. + + :param context: The request context, for access checks. + """ + + self._driver.expire(context) + + @property + def resources(self): + return sorted(self._resources.keys()) + + +def _sync_volumes(context, project_id, session): + (volumes, gigs) = db.volume_data_get_for_project(context, + project_id, + session=session) + return {'volumes': volumes} + + +def _sync_snapshots(context, project_id, session): + (snapshots, gigs) = db.snapshot_data_get_for_project(context, + project_id, + session=session) + return {'snapshots': snapshots} + + +def _sync_gigabytes(context, project_id, session): + (_junk, vol_gigs) = db.volume_data_get_for_project(context, + project_id, + session=session) + if FLAGS.no_snapshot_gb_quota: + return {'gigabytes': vol_gigs} + + (_junk, snap_gigs) = db.snapshot_data_get_for_project(context, + project_id, + session=session) + return {'gigabytes': vol_gigs + snap_gigs} + + +QUOTAS = QuotaEngine() + + +resources = [ + ReservableResource('volumes', _sync_volumes, 'quota_volumes'), + ReservableResource('snapshots', _sync_snapshots, 'quota_snapshots'), + ReservableResource('gigabytes', _sync_gigabytes, 'quota_gigabytes'), ] + + +QUOTAS.register_resources(resources) diff --git a/cinder/scheduler/__init__.py b/cinder/scheduler/__init__.py new file mode 100644 index 0000000000..727de580eb --- /dev/null +++ b/cinder/scheduler/__init__.py @@ -0,0 +1,27 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.scheduler` -- Scheduler Nodes +===================================================== + +.. automodule:: cinder.scheduler + :platform: Unix + :synopsis: Module that picks a volume node to create a volume. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe +.. moduleauthor:: Chris Behrens +""" diff --git a/cinder/scheduler/chance.py b/cinder/scheduler/chance.py new file mode 100644 index 0000000000..0393afca53 --- /dev/null +++ b/cinder/scheduler/chance.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Chance (Random) Scheduler implementation +""" + +import random + +from cinder import exception +from cinder import flags +from cinder.scheduler import driver + + +FLAGS = flags.FLAGS + + +class ChanceScheduler(driver.Scheduler): + """Implements Scheduler as a random node selector.""" + + def _filter_hosts(self, request_spec, hosts, **kwargs): + """Filter a list of hosts based on request_spec.""" + + filter_properties = kwargs.get('filter_properties', {}) + ignore_hosts = filter_properties.get('ignore_hosts', []) + hosts = [host for host in hosts if host not in ignore_hosts] + return hosts + + def _schedule(self, context, topic, request_spec, **kwargs): + """Picks a host that is up at random.""" + + elevated = context.elevated() + hosts = self.hosts_up(elevated, topic) + if not hosts: + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + hosts = self._filter_hosts(request_spec, hosts, **kwargs) + if not hosts: + msg = _("Could not find another host") + raise exception.NoValidHost(reason=msg) + + return hosts[int(random.random() * len(hosts))] + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Picks a host that is up at random.""" + topic = FLAGS.volume_topic + host = self._schedule(context, topic, request_spec, + filter_properties=filter_properties) + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + + updated_volume = driver.volume_update_db(context, volume_id, host) + self.volume_rpcapi.create_volume(context, updated_volume, host, + snapshot_id, image_id) + + def schedule_create_share(self, context, request_spec, filter_properties): + """Picks a host that is up at random.""" + topic = FLAGS.share_topic + host = self._schedule(context, topic, request_spec, + filter_properties=filter_properties) + share_id = request_spec['share_id'] + snapshot_id = request_spec['snapshot_id'] + + updated_share = driver.share_update_db(context, share_id, host) + self.share_rpcapi.create_share(context, updated_share, host, + request_spec, + filter_properties, + snapshot_id) diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py new file mode 100644 index 0000000000..b3aa7bf177 --- /dev/null +++ b/cinder/scheduler/driver.py @@ -0,0 +1,109 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler base class that all Schedulers should inherit from +""" + +from oslo.config import cfg + +from cinder import db +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import timeutils +from cinder.share import rpcapi as share_rpcapi +from cinder import utils +from cinder.volume import rpcapi as volume_rpcapi + +scheduler_driver_opts = [ + cfg.StrOpt('scheduler_host_manager', + default='cinder.scheduler.host_manager.HostManager', + help='The scheduler host manager class to use'), + cfg.IntOpt('scheduler_max_attempts', + default=3, + help='Maximum number of attempts to schedule an volume'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(scheduler_driver_opts) + + +def share_update_db(context, share_id, host): + '''Set the host and set the scheduled_at field of a share. + + :returns: A Share with the updated fields set properly. + ''' + now = timeutils.utcnow() + values = {'host': host, 'scheduled_at': now} + return db.share_update(context, share_id, values) + + +def volume_update_db(context, volume_id, host): + '''Set the host and set the scheduled_at field of a volume. + + :returns: A Volume with the updated fields set properly. + ''' + now = timeutils.utcnow() + values = {'host': host, 'scheduled_at': now} + return db.volume_update(context, volume_id, values) + + +class Scheduler(object): + """The base class that all Scheduler classes should inherit from.""" + + def __init__(self): + self.host_manager = importutils.import_object( + FLAGS.scheduler_host_manager) + self.share_rpcapi = share_rpcapi.ShareAPI() + self.volume_rpcapi = volume_rpcapi.VolumeAPI() + + def get_host_list(self): + """Get a list of hosts from the HostManager.""" + return self.host_manager.get_host_list() + + def get_service_capabilities(self): + """Get the normalized set of capabilities for the services. + """ + return self.host_manager.get_service_capabilities() + + def update_service_capabilities(self, service_name, host, capabilities): + """Process a capability update from a service node.""" + self.host_manager.update_service_capabilities(service_name, + host, + capabilities) + + def hosts_up(self, context, topic): + """Return the list of hosts that have a running service for topic.""" + + services = db.service_get_all_by_topic(context, topic) + return [service['host'] + for service in services + if utils.service_is_up(service)] + + def schedule(self, context, topic, method, *_args, **_kwargs): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement a fallback schedule")) + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement schedule_create_volume")) + + def schedule_create_share(self, context, request_spec, filter_properties): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement schedule_create_share")) diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py new file mode 100644 index 0000000000..ef87f40e8e --- /dev/null +++ b/cinder/scheduler/filter_scheduler.py @@ -0,0 +1,354 @@ +# Copyright (c) 2011 Intel Corporation +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The FilterScheduler is for creating volumes. +You can customize this scheduler by specifying your own volume Filters and +Weighing Functions. +""" + +import operator + +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.scheduler import driver +from cinder.scheduler import scheduler_options + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class FilterScheduler(driver.Scheduler): + """Scheduler that can be used for filtering and weighing.""" + def __init__(self, *args, **kwargs): + super(FilterScheduler, self).__init__(*args, **kwargs) + self.cost_function_cache = None + self.options = scheduler_options.SchedulerOptions() + self.max_attempts = self._max_attempts() + + def schedule(self, context, topic, method, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + self._schedule(context, topic, *args, **kwargs) + + def _get_configuration_options(self): + """Fetch options dictionary. Broken out for testing.""" + return self.options.get_configuration() + + def populate_filter_properties(self, request_spec, filter_properties): + """Stuff things into filter_properties. Can be overridden in a + subclass to add more data. + """ + vol = request_spec['volume_properties'] + filter_properties['size'] = vol['size'] + filter_properties['availability_zone'] = vol.get('availability_zone') + filter_properties['user_id'] = vol.get('user_id') + filter_properties['metadata'] = vol.get('metadata') + + def schedule_create_volume(self, context, request_spec, filter_properties): + weighed_host = self._schedule(context, request_spec, + filter_properties) + + if not weighed_host: + raise exception.NoValidHost(reason="") + + host = weighed_host.obj.host + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + + updated_volume = driver.volume_update_db(context, volume_id, host) + self._post_select_populate_filter_properties(filter_properties, + weighed_host.obj) + + # context is not serializable + filter_properties.pop('context', None) + + self.volume_rpcapi.create_volume(context, updated_volume, host, + request_spec=request_spec, + filter_properties=filter_properties, + allow_reschedule=True, + snapshot_id=snapshot_id, + image_id=image_id) + + def _post_select_populate_filter_properties(self, filter_properties, + host_state): + """Add additional information to the filter properties after a host has + been selected by the scheduling process. + """ + # Add a retry entry for the selected volume backend: + self._add_retry_host(filter_properties, host_state.host) + + def _add_retry_host(self, filter_properties, host): + """Add a retry entry for the selected volume backend. In the event that + the request gets re-scheduled, this entry will signal that the given + backend has already been tried. + """ + retry = filter_properties.get('retry', None) + if not retry: + return + hosts = retry['hosts'] + hosts.append(host) + + def _max_attempts(self): + max_attempts = FLAGS.scheduler_max_attempts + if max_attempts < 1: + msg = _("Invalid value for 'scheduler_max_attempts', " + "must be >=1") + raise exception.InvalidParameterValue(err=msg) + return max_attempts + + def _log_volume_error(self, volume_id, retry): + """If the request contained an exception from a previous volume + create operation, log it to aid debugging. + """ + exc = retry.pop('exc', None) # string-ified exception from volume + if not exc: + return # no exception info from a previous attempt, skip + + hosts = retry.get('hosts', None) + if not hosts: + return # no previously attempted hosts, skip + + last_host = hosts[-1] + msg = _("Error scheduling %(volume_id)s from last vol-service: " + "%(last_host)s : %(exc)s") % locals() + LOG.error(msg) + + def _populate_retry(self, filter_properties, properties): + """Populate filter properties with history of retries for this + request. If maximum retries is exceeded, raise NoValidHost. + """ + max_attempts = self.max_attempts + retry = filter_properties.pop('retry', {}) + + if max_attempts == 1: + # re-scheduling is disabled. + return + + # retry is enabled, update attempt count: + if retry: + retry['num_attempts'] += 1 + else: + retry = { + 'num_attempts': 1, + 'hosts': [] # list of volume service hosts tried + } + filter_properties['retry'] = retry + + volume_id = properties.get('volume_id') + self._log_volume_error(volume_id, retry) + + if retry['num_attempts'] > max_attempts: + msg = _("Exceeded max scheduling attempts %(max_attempts)d for " + "volume %(volume_id)s") % locals() + raise exception.NoValidHost(reason=msg) + + def _schedule(self, context, request_spec, filter_properties=None): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + volume_properties = request_spec['volume_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, copying + # 'volume_XX' to 'resource_XX' will make both filters happy. + resource_properties = volume_properties.copy() + volume_type = request_spec.get("volume_type", None) + resource_type = request_spec.get("volume_type", None) + request_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + if filter_properties is None: + filter_properties = {} + self._populate_retry(filter_properties, resource_properties) + + filter_properties.update({'context': context, + 'request_spec': request_spec, + 'config_options': config_options, + 'volume_type': volume_type, + 'resource_type': resource_type}) + + self.populate_filter_properties(request_spec, + filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + hosts = self.host_manager.get_all_host_states(elevated) + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(hosts, + filter_properties) + if not hosts: + return None + + LOG.debug(_("Filtered %(hosts)s") % locals()) + # weighted_host = WeightedHost() ... the best + # host for the job. + weighed_hosts = self.host_manager.get_weighed_hosts(hosts, + filter_properties) + best_host = weighed_hosts[0] + LOG.debug(_("Choosing %(best_host)s") % locals()) + best_host.obj.consume_from_volume(volume_properties) + return best_host + + #NOTE(rushiagr): Methods for scheduling shares + + def schedule_create_share(self, context, request_spec, filter_properties): + weighed_host = self._schedule_share(context, + request_spec, + filter_properties) + + if not weighed_host: + raise exception.NoValidHost(reason="") + + host = weighed_host.obj.host + share_id = request_spec['share_id'] + snapshot_id = request_spec['snapshot_id'] + + updated_share = driver.share_update_db(context, share_id, host) + self._post_select_populate_filter_properties(filter_properties, + weighed_host.obj) + + # context is not serializable + filter_properties.pop('context', None) + + self.share_rpcapi.create_share(context, updated_share, host, + request_spec=request_spec, + filter_properties=filter_properties, + snapshot_id=snapshot_id) + + def _schedule_share(self, context, request_spec, filter_properties=None): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + share_properties = request_spec['share_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, copying + # 'volume_XX' to 'resource_XX' will make both filters happy. + resource_properties = share_properties.copy() + share_type = request_spec.get("share_type", {}) + resource_type = request_spec.get("share_type", {}) + request_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + if filter_properties is None: + filter_properties = {} + self._populate_retry_share(filter_properties, resource_properties) + + filter_properties.update({'context': context, + 'request_spec': request_spec, + 'config_options': config_options, + 'share_type': share_type, + 'resource_type': resource_type + }) + + self.populate_filter_properties_share(request_spec, filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + hosts = self.host_manager.get_all_host_states_share(elevated) + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(hosts, + filter_properties) + if not hosts: + return None + + LOG.debug(_("Filtered share %(hosts)s") % locals()) + # weighted_host = WeightedHost() ... the best + # host for the job. + weighed_hosts = self.host_manager.get_weighed_hosts(hosts, + filter_properties) + best_host = weighed_hosts[0] + LOG.debug(_("Choosing for share: %(best_host)s") % locals()) + #NOTE(rushiagr): updating the available space parameters at same place + best_host.obj.consume_from_volume(share_properties) + return best_host + + def _populate_retry_share(self, filter_properties, properties): + """Populate filter properties with history of retries for this + request. If maximum retries is exceeded, raise NoValidHost. + """ + max_attempts = self.max_attempts + retry = filter_properties.pop('retry', {}) + + if max_attempts == 1: + # re-scheduling is disabled. + return + + # retry is enabled, update attempt count: + if retry: + retry['num_attempts'] += 1 + else: + retry = { + 'num_attempts': 1, + 'hosts': [] # list of share service hosts tried + } + filter_properties['retry'] = retry + + share_id = properties.get('share_id') + self._log_share_error(share_id, retry) + + if retry['num_attempts'] > max_attempts: + msg = _("Exceeded max scheduling attempts %(max_attempts)d for " + "share %(share_id)s") % locals() + raise exception.NoValidHost(reason=msg) + + def _log_share_error(self, share_id, retry): + """If the request contained an exception from a previous share + create operation, log it to aid debugging. + """ + exc = retry.pop('exc', None) # string-ified exception from share + if not exc: + return # no exception info from a previous attempt, skip + + hosts = retry.get('hosts', None) + if not hosts: + return # no previously attempted hosts, skip + + last_host = hosts[-1] + msg = _("Error scheduling %(share_id)s from last share-service: " + "%(last_host)s : %(exc)s") % locals() + LOG.error(msg) + + def populate_filter_properties_share(self, request_spec, + filter_properties): + """Stuff things into filter_properties. Can be overridden in a + subclass to add more data. + """ + shr = request_spec['share_properties'] + filter_properties['size'] = shr['size'] + filter_properties['availability_zone'] = shr.get('availability_zone') + filter_properties['user_id'] = shr.get('user_id') + filter_properties['metadata'] = shr.get('metadata') diff --git a/cinder/scheduler/filters/__init__.py b/cinder/scheduler/filters/__init__.py new file mode 100644 index 0000000000..ce4951de97 --- /dev/null +++ b/cinder/scheduler/filters/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py new file mode 100644 index 0000000000..1f5ec624e2 --- /dev/null +++ b/cinder/scheduler/filters/capacity_filter.py @@ -0,0 +1,57 @@ +# Copyright (c) 2012 Intel +# Copyright (c) 2012 OpenStack, LLC. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import math + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters + + +LOG = logging.getLogger(__name__) + + +class CapacityFilter(filters.BaseHostFilter): + """CapacityFilter filters based on volume host's capacity utilization.""" + + def host_passes(self, host_state, filter_properties): + """Return True if host has sufficient capacity.""" + volume_size = filter_properties.get('size') + + if host_state.free_capacity_gb is None: + # Fail Safe + LOG.error(_("Free capacity not set: " + "volume node info collection broken.")) + return False + + free_space = host_state.free_capacity_gb + if free_space == 'infinite' or free_space == 'unknown': + # NOTE(zhiteng) for those back-ends cannot report actual + # available capacity, we assume it is able to serve the + # request. Even if it was not, the retry mechanism is + # able to handle the failure by rescheduling + return True + reserved = float(host_state.reserved_percentage) / 100 + free = math.floor(free_space * (1 - reserved)) + if free < volume_size: + LOG.warning(_("Insufficient free space for volume creation " + "(requested / avail): " + "%(requested)s/%(available)s") + % {'requested': volume_size, + 'available': free}) + + return free >= volume_size diff --git a/cinder/scheduler/filters/retry_filter.py b/cinder/scheduler/filters/retry_filter.py new file mode 100644 index 0000000000..ae84a4e277 --- /dev/null +++ b/cinder/scheduler/filters/retry_filter.py @@ -0,0 +1,45 @@ +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class RetryFilter(filters.BaseHostFilter): + """Filter out nodes that have already been attempted for scheduling + purposes + """ + + def host_passes(self, host_state, filter_properties): + """Skip nodes that have already been attempted.""" + retry = filter_properties.get('retry', None) + if not retry: + # Re-scheduling is disabled + LOG.debug("Re-scheduling is disabled") + return True + + hosts = retry.get('hosts', []) + host = host_state.host + + passes = host not in hosts + pass_msg = "passes" if passes else "fails" + + LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: " + "%(hosts)s") % locals()) + + # Host passes if it's not in the list of previously attempted hosts: + return passes diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py new file mode 100644 index 0000000000..45039902bf --- /dev/null +++ b/cinder/scheduler/host_manager.py @@ -0,0 +1,337 @@ +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Manage hosts in the current zone. +""" + +import UserDict + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common.scheduler import weights +from cinder.openstack.common import timeutils +from cinder import utils + +host_manager_opts = [ + cfg.ListOpt('scheduler_default_filters', + default=[ + 'AvailabilityZoneFilter', + 'CapacityFilter', + 'CapabilitiesFilter' + ], + help='Which filter class names to use for filtering hosts ' + 'when not specified in the request.'), + cfg.ListOpt('scheduler_default_weighers', + default=[ + 'CapacityWeigher' + ], + help='Which weigher class names to use for weighing hosts.') +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(host_manager_opts) + +LOG = logging.getLogger(__name__) + + +class ReadOnlyDict(UserDict.IterableUserDict): + """A read-only dict.""" + def __init__(self, source=None): + self.data = {} + self.update(source) + + def __setitem__(self, key, item): + raise TypeError + + def __delitem__(self, key): + raise TypeError + + def clear(self): + raise TypeError + + def pop(self, key, *args): + raise TypeError + + def popitem(self): + raise TypeError + + def update(self, source=None): + if source is None: + return + elif isinstance(source, UserDict.UserDict): + self.data = source.data + elif isinstance(source, type({})): + self.data = source + else: + raise TypeError + + +class HostState(object): + """Mutable and immutable information tracked for a host.""" + + def __init__(self, host, capabilities=None, service=None): + self.host = host + self.update_capabilities(capabilities, service) + + self.volume_backend_name = None + self.share_backend_name = None + self.vendor_name = None + self.driver_version = 0 + self.storage_protocol = None + self.QoS_support = False + # Mutable available resources. + # These will change as resources are virtually "consumed". + self.total_capacity_gb = 0 + self.free_capacity_gb = None + self.reserved_percentage = 0 + + self.updated = None + + def update_capabilities(self, capabilities=None, service=None): + # Read-only capability dicts + + if capabilities is None: + capabilities = {} + self.capabilities = ReadOnlyDict(capabilities) + if service is None: + service = {} + self.service = ReadOnlyDict(service) + + def update_from_volume_capability(self, capability): + """Update information about a host from its volume_node info.""" + if capability: + if self.updated and self.updated > capability['timestamp']: + return + + self.volume_backend = capability.get('volume_backend_name', None) + self.vendor_name = capability.get('vendor_name', None) + self.driver_version = capability.get('driver_version', None) + self.storage_protocol = capability.get('storage_protocol', None) + self.QoS_support = capability.get('QoS_support', False) + + self.total_capacity_gb = capability['total_capacity_gb'] + self.free_capacity_gb = capability['free_capacity_gb'] + self.reserved_percentage = capability['reserved_percentage'] + + self.updated = capability['timestamp'] + + def update_from_share_capability(self, capability): + """Update information about a host from its volume_node info.""" + if capability: + if self.updated and self.updated > capability['timestamp']: + return + + self.share_backend = capability.get('share_backend_name', None) + self.vendor_name = capability.get('vendor_name', None) + self.driver_version = capability.get('driver_version', None) + self.storage_protocol = capability.get('storage_protocol', None) + self.QoS_support = capability.get('QoS_support', False) + + self.total_capacity_gb = capability['total_capacity_gb'] + self.free_capacity_gb = capability['free_capacity_gb'] + self.reserved_percentage = capability['reserved_percentage'] + + self.updated = capability['timestamp'] + + def consume_from_volume(self, volume): + """Incrementally update host state from an volume.""" + volume_gb = volume['size'] + if self.free_capacity_gb == 'infinite': + # There's virtually infinite space on back-end + pass + elif self.free_capacity_gb == 'unknown': + # Unable to determine the actual free space on back-end + pass + else: + self.free_capacity_gb -= volume_gb + self.updated = timeutils.utcnow() + + def __repr__(self): + return ("host '%s': free_capacity_gb: %s" % + (self.host, self.free_capacity_gb)) + + +class HostManager(object): + """Base HostManager class.""" + + host_state_cls = HostState + + def __init__(self): + self.service_states = {} # { : {: {cap k : v}}} + self.host_state_map = {} + self.filter_handler = filters.HostFilterHandler('cinder.scheduler.' + 'filters') + self.filter_classes = self.filter_handler.get_all_classes() + self.weight_handler = weights.HostWeightHandler('cinder.scheduler.' + 'weights') + self.weight_classes = self.weight_handler.get_all_classes() + + def _choose_host_filters(self, filter_cls_names): + """Since the caller may specify which filters to use we need + to have an authoritative list of what is permissible. This + function checks the filter names against a predefined set + of acceptable filters. + """ + if filter_cls_names is None: + filter_cls_names = FLAGS.scheduler_default_filters + if not isinstance(filter_cls_names, (list, tuple)): + filter_cls_names = [filter_cls_names] + good_filters = [] + bad_filters = [] + for filter_name in filter_cls_names: + found_class = False + for cls in self.filter_classes: + if cls.__name__ == filter_name: + found_class = True + good_filters.append(cls) + break + if not found_class: + bad_filters.append(filter_name) + if bad_filters: + msg = ", ".join(bad_filters) + raise exception.SchedulerHostFilterNotFound(filter_name=msg) + return good_filters + + def _choose_host_weighers(self, weight_cls_names): + """Since the caller may specify which weighers to use, we need + to have an authoritative list of what is permissible. This + function checks the weigher names against a predefined set + of acceptable weighers. + """ + if weight_cls_names is None: + weight_cls_names = FLAGS.scheduler_default_weighers + if not isinstance(weight_cls_names, (list, tuple)): + weight_cls_names = [weight_cls_names] + + good_weighers = [] + bad_weighers = [] + for weigher_name in weight_cls_names: + found_class = False + for cls in self.weight_classes: + if cls.__name__ == weigher_name: + good_weighers.append(cls) + found_class = True + break + if not found_class: + bad_weighers.append(weigher_name) + if bad_weighers: + msg = ", ".join(bad_weighers) + raise exception.SchedulerHostWeigherNotFound(weigher_name=msg) + return good_weighers + + def get_filtered_hosts(self, hosts, filter_properties, + filter_class_names=None): + """Filter hosts and return only ones passing all filters""" + filter_classes = self._choose_host_filters(filter_class_names) + return self.filter_handler.get_filtered_objects(filter_classes, + hosts, + filter_properties) + + def get_weighed_hosts(self, hosts, weight_properties, + weigher_class_names=None): + """Weigh the hosts""" + weigher_classes = self._choose_host_weighers(weigher_class_names) + return self.weight_handler.get_weighed_objects(weigher_classes, + hosts, + weight_properties) + + def update_service_capabilities(self, service_name, host, capabilities): + """Update the per-service capabilities based on this notification.""" + if service_name not in ('volume', 'share'): + LOG.debug(_('Ignoring %(service_name)s service update ' + 'from %(host)s'), locals()) + return + + LOG.debug(_("Received %(service_name)s service update from " + "%(host)s.") % locals()) + + # Copy the capabilities, so we don't modify the original dict + capab_copy = dict(capabilities) + capab_copy["timestamp"] = timeutils.utcnow() # Reported time + self.service_states[host] = capab_copy + + def get_all_host_states(self, context): + """Returns a dict of all the hosts the HostManager + knows about. Also, each of the consumable resources in HostState + are pre-populated and adjusted based on data in the db. + + For example: + {'192.168.1.100': HostState(), ...} + """ + + # Get resource usage across the available volume nodes: + topic = FLAGS.volume_topic + volume_services = db.service_get_all_by_topic(context, topic) + for service in volume_services: + if not utils.service_is_up(service) or service['disabled']: + LOG.warn(_("service is down or disabled.")) + continue + host = service['host'] + capabilities = self.service_states.get(host, None) + host_state = self.host_state_map.get(host) + if host_state: + # copy capabilities to host_state.capabilities + host_state.update_capabilities(capabilities, + dict(service.iteritems())) + else: + host_state = self.host_state_cls(host, + capabilities=capabilities, + service= + dict(service.iteritems())) + self.host_state_map[host] = host_state + # update host_state + host_state.update_from_volume_capability(capabilities) + + return self.host_state_map.itervalues() + + def get_all_host_states_share(self, context): + """Returns a dict of all the hosts the HostManager + knows about. Also, each of the consumable resources in HostState + are pre-populated and adjusted based on data in the db. + + For example: + {'192.168.1.100': HostState(), ...} + """ + + # Get resource usage across the available share nodes: + topic = FLAGS.share_topic + share_services = db.service_get_all_by_topic(context, topic) + for service in share_services: + if not utils.service_is_up(service) or service['disabled']: + LOG.warn(_("service is down or disabled.")) + continue + host = service['host'] + capabilities = self.service_states.get(host, None) + host_state = self.host_state_map.get(host) + if host_state: + # copy capabilities to host_state.capabilities + host_state.update_capabilities(capabilities, + dict(service.iteritems())) + else: + host_state = self.host_state_cls(host, + capabilities=capabilities, + service= + dict(service.iteritems())) + self.host_state_map[host] = host_state + # update host_state + host_state.update_from_share_capability(capabilities) + + return self.host_state_map.itervalues() diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py new file mode 100644 index 0000000000..f41fb0b3b7 --- /dev/null +++ b/cinder/scheduler/manager.py @@ -0,0 +1,178 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler Service +""" + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier +from cinder.share import rpcapi as share_rpcapi +from cinder.volume import rpcapi as volume_rpcapi + +LOG = logging.getLogger(__name__) + +scheduler_driver_opt = cfg.StrOpt('scheduler_driver', + default='cinder.scheduler.filter_scheduler.' + 'FilterScheduler', + help='Default scheduler driver to use') + +FLAGS = flags.FLAGS +FLAGS.register_opt(scheduler_driver_opt) + + +class SchedulerManager(manager.Manager): + """Chooses a host to create volumes.""" + + RPC_API_VERSION = '1.3' + + def __init__(self, scheduler_driver=None, service_name=None, + *args, **kwargs): + if not scheduler_driver: + scheduler_driver = FLAGS.scheduler_driver + self.driver = importutils.import_object(scheduler_driver) + super(SchedulerManager, self).__init__(*args, **kwargs) + + def init_host(self): + ctxt = context.get_admin_context() + self.request_service_capabilities(ctxt) + + def get_host_list(self, context): + """Get a list of hosts from the HostManager.""" + return self.driver.get_host_list() + + def get_service_capabilities(self, context): + """Get the normalized set of capabilities for this zone.""" + return self.driver.get_service_capabilities() + + def update_service_capabilities(self, context, service_name=None, + host=None, capabilities=None, **kwargs): + """Process a capability update from a service node.""" + if capabilities is None: + capabilities = {} + self.driver.update_service_capabilities(service_name, + host, + capabilities) + + def create_volume(self, context, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): + try: + if request_spec is None: + # For RPC version < 1.2 backward compatibility + request_spec = {} + volume_ref = db.volume_get(context, volume_id) + size = volume_ref.get('size') + availability_zone = volume_ref.get('availability_zone') + volume_type_id = volume_ref.get('volume_type_id') + vol_type = db.volume_type_get(context, volume_type_id) + volume_properties = {'size': size, + 'availability_zone': availability_zone, + 'volume_type_id': volume_type_id} + request_spec.update( + {'volume_id': volume_id, + 'snapshot_id': snapshot_id, + 'image_id': image_id, + 'volume_properties': volume_properties, + 'volume_type': dict(vol_type).iteritems()}) + + self.driver.schedule_create_volume(context, request_spec, + filter_properties) + except exception.NoValidHost as ex: + volume_state = {'volume_state': {'status': 'error'}} + self._set_volume_state_and_notify('create_volume', + volume_state, + context, ex, request_spec) + except Exception as ex: + with excutils.save_and_reraise_exception(): + volume_state = {'volume_state': {'status': 'error'}} + self._set_volume_state_and_notify('create_volume', + volume_state, + context, ex, request_spec) + + def create_share(self, context, topic, share_id, snapshot_id=None, + request_spec=None, filter_properties=None): + try: + self.driver.schedule_create_share(context, request_spec, + filter_properties) + except exception.NoValidHost as ex: + self._set_share_error_state_and_notify('create_share', + context, ex, request_spec) + except Exception as ex: + with excutils.save_and_reraise_exception(): + self._set_share_error_state_and_notify('create_share', + context, ex, + request_spec) + + def _set_share_error_state_and_notify(self, method, context, ex, + request_spec): + LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) + + share_state = {'status': 'error'} + properties = request_spec.get('share_properties', {}) + + share_id = request_spec.get('share_id', None) + + if share_id: + db.share_update(context, share_id, share_state) + + payload = dict(request_spec=request_spec, + share_properties=properties, + share_id=share_id, + state=share_state, + method=method, + reason=ex) + + notifier.notify(context, notifier.publisher_id("scheduler"), + 'scheduler.' + method, notifier.ERROR, payload) + + def _set_volume_state_and_notify(self, method, updates, context, ex, + request_spec): + LOG.error(_("Failed to schedule_%(method)s: %(ex)s") % locals()) + + volume_state = updates['volume_state'] + properties = request_spec.get('volume_properties', {}) + + volume_id = request_spec.get('volume_id', None) + + if volume_id: + db.volume_update(context, volume_id, volume_state) + + payload = dict(request_spec=request_spec, + volume_properties=properties, + volume_id=volume_id, + state=volume_state, + method=method, + reason=ex) + + notifier.notify(context, notifier.publisher_id("scheduler"), + 'scheduler.' + method, notifier.ERROR, payload) + + def request_service_capabilities(self, context): + volume_rpcapi.VolumeAPI().publish_service_capabilities(context) + share_rpcapi.ShareAPI().publish_service_capabilities(context) diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py new file mode 100644 index 0000000000..4a9bc0a2db --- /dev/null +++ b/cinder/scheduler/rpcapi.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the scheduler manager RPC API. +""" + +from cinder import flags +from cinder.openstack.common import jsonutils +import cinder.openstack.common.rpc.proxy + + +FLAGS = flags.FLAGS + + +class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the scheduler rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Add create_volume() method + 1.2 - Add request_spec, filter_properties arguments + to create_volume() + 1.3 - Add create_share() method + ''' + + RPC_API_VERSION = '1.0' + + def __init__(self): + super(SchedulerAPI, self).__init__( + topic=FLAGS.scheduler_topic, + default_version=self.RPC_API_VERSION) + + def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'create_volume', + topic=topic, + volume_id=volume_id, + snapshot_id=snapshot_id, + image_id=image_id, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.2') + + def create_share(self, ctxt, topic, share_id, snapshot_id=None, + request_spec=None, filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'create_share', + topic=topic, + share_id=share_id, + snapshot_id=snapshot_id, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.3') + + def update_service_capabilities(self, ctxt, + service_name, host, + capabilities): + self.fanout_cast(ctxt, self.make_msg('update_service_capabilities', + service_name=service_name, host=host, + capabilities=capabilities)) diff --git a/cinder/scheduler/scheduler_options.py b/cinder/scheduler/scheduler_options.py new file mode 100644 index 0000000000..9b05bb7178 --- /dev/null +++ b/cinder/scheduler/scheduler_options.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SchedulerOptions monitors a local .json file for changes and loads +it if needed. This file is converted to a data structure and passed +into the filtering and weighing functions which can use it for +dynamic configuration. +""" + +import datetime +import json +import os + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +scheduler_json_config_location_opt = cfg.StrOpt( + 'scheduler_json_config_location', + default='', + help='Absolute path to scheduler configuration JSON file.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(scheduler_json_config_location_opt) + +LOG = logging.getLogger(__name__) + + +class SchedulerOptions(object): + """ + SchedulerOptions monitors a local .json file for changes and loads it + if needed. This file is converted to a data structure and passed into + the filtering and weighing functions which can use it for dynamic + configuration. + """ + + def __init__(self): + super(SchedulerOptions, self).__init__() + self.data = {} + self.last_modified = None + self.last_checked = None + + def _get_file_handle(self, filename): + """Get file handle. Broken out for testing.""" + return open(filename) + + def _get_file_timestamp(self, filename): + """Get the last modified datetime. Broken out for testing.""" + try: + return os.path.getmtime(filename) + except os.error, e: + LOG.exception(_("Could not stat scheduler options file " + "%(filename)s: '%(e)s'"), locals()) + raise + + def _load_file(self, handle): + """Decode the JSON file. Broken out for testing.""" + try: + return json.load(handle) + except ValueError, e: + LOG.exception(_("Could not decode scheduler options: " + "'%(e)s'") % locals()) + return {} + + def _get_time_now(self): + """Get current UTC. Broken out for testing.""" + return timeutils.utcnow() + + def get_configuration(self, filename=None): + """Check the json file for changes and load it if needed.""" + if not filename: + filename = FLAGS.scheduler_json_config_location + if not filename: + return self.data + if self.last_checked: + now = self._get_time_now() + if now - self.last_checked < datetime.timedelta(minutes=5): + return self.data + + last_modified = self._get_file_timestamp(filename) + if (not last_modified or not self.last_modified or + last_modified > self.last_modified): + self.data = self._load_file(self._get_file_handle(filename)) + self.last_modified = last_modified + if not self.data: + self.data = {} + + return self.data diff --git a/cinder/scheduler/simple.py b/cinder/scheduler/simple.py new file mode 100644 index 0000000000..c088a72f6f --- /dev/null +++ b/cinder/scheduler/simple.py @@ -0,0 +1,137 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple Scheduler +""" + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.scheduler import chance +from cinder.scheduler import driver +from cinder import utils + +simple_scheduler_opts = [ + cfg.IntOpt("max_gigabytes", + default=10000, + help="maximum number of volume gigabytes to allow per host"), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(simple_scheduler_opts) + + +class SimpleScheduler(chance.ChanceScheduler): + """Implements Naive Scheduler that tries to find least loaded host.""" + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Picks a host that is up and has the fewest volumes.""" + elevated = context.elevated() + + volume_id = request_spec.get('volume_id') + snapshot_id = request_spec.get('snapshot_id') + image_id = request_spec.get('image_id') + volume_properties = request_spec.get('volume_properties') + volume_size = volume_properties.get('size') + availability_zone = volume_properties.get('availability_zone') + + zone, host = None, None + if availability_zone: + zone, _x, host = availability_zone.partition(':') + if host and context.is_admin: + topic = FLAGS.volume_topic + service = db.service_get_by_args(elevated, host, topic) + if not utils.service_is_up(service): + raise exception.WillNotSchedule(host=host) + updated_volume = driver.volume_update_db(context, volume_id, host) + self.volume_rpcapi.create_volume(context, + updated_volume, + host, + snapshot_id, + image_id) + return None + + results = db.service_get_all_volume_sorted(elevated) + if zone: + results = [(service, gigs) for (service, gigs) in results + if service['availability_zone'] == zone] + for result in results: + (service, volume_gigabytes) = result + if volume_gigabytes + volume_size > FLAGS.max_gigabytes: + msg = _("Not enough allocatable volume gigabytes remaining") + raise exception.NoValidHost(reason=msg) + if utils.service_is_up(service) and not service['disabled']: + updated_volume = driver.volume_update_db(context, volume_id, + service['host']) + self.volume_rpcapi.create_volume(context, + updated_volume, + service['host'], + snapshot_id, + image_id) + return None + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + def schedule_create_share(self, context, request_spec, filter_properties): + """Picks a host that is up and has the fewest shares.""" + #TODO(rushiagr) - pick only hosts that run shares + elevated = context.elevated() + + share_id = request_spec.get('share_id') + snapshot_id = request_spec.get('snapshot_id') + share_properties = request_spec.get('share_properties') + share_size = share_properties.get('size') + availability_zone = share_properties.get('availability_zone') + + zone, host = None, None + if availability_zone: + zone, _x, host = availability_zone.partition(':') + if host and context.is_admin: + service = db.service_get_by_args(elevated, host, FLAGS.share_topic) + if not utils.service_is_up(service): + raise exception.WillNotSchedule(host=host) + updated_share = driver.share_update_db(context, share_id, host) + self.share_rpcapi.create_share(context, + updated_share, + host, + snapshot_id, + None) + return None + + results = db.service_get_all_share_sorted(elevated) + if zone: + results = [(service, gigs) for (service, gigs) in results + if service['availability_zone'] == zone] + for result in results: + (service, share_gigabytes) = result + if share_gigabytes + share_size > FLAGS.max_gigabytes: + msg = _("Not enough allocatable share gigabytes remaining") + raise exception.NoValidHost(reason=msg) + if utils.service_is_up(service) and not service['disabled']: + updated_share = driver.share_update_db(context, share_id, + service['host']) + self.share_rpcapi.create_share(context, + updated_share, + service['host'], + snapshot_id, None) + return None + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) diff --git a/cinder/scheduler/weights/__init__.py b/cinder/scheduler/weights/__init__.py new file mode 100644 index 0000000000..ce4951de97 --- /dev/null +++ b/cinder/scheduler/weights/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/scheduler/weights/capacity.py b/cinder/scheduler/weights/capacity.py new file mode 100644 index 0000000000..e2042e9cd7 --- /dev/null +++ b/cinder/scheduler/weights/capacity.py @@ -0,0 +1,56 @@ +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Capacity Weigher. Weigh hosts by their available capacity. + +The default is to spread volumes across all hosts evenly. If you prefer +stacking, you can set the 'capacity_weight_multiplier' option to a negative +number and the weighing has the opposite effect of the default. +""" + +import math + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common.scheduler import weights + +capacity_weight_opts = [ + cfg.FloatOpt('capacity_weight_multiplier', + default=1.0, + help='Multiplier used for weighing volume capacity. ' + 'Negative numbers mean to stack vs spread.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(capacity_weight_opts) + + +class CapacityWeigher(weights.BaseHostWeigher): + def _weight_multiplier(self): + """Override the weight multiplier.""" + return FLAGS.capacity_weight_multiplier + + def _weigh_object(self, host_state, weight_properties): + """Higher weights win. We want spreading to be the default.""" + reserved = float(host_state.reserved_percentage) / 100 + free_space = host_state.free_capacity_gb + if free_space == 'infinite' or free_space == 'unknown': + #(zhiteng) 'infinite' and 'unknown' are treated the same + # here, for sorting purpose. + free = float('inf') + else: + free = math.floor(host_state.free_capacity_gb * (1 - reserved)) + return free diff --git a/cinder/service.py b/cinder/service.py new file mode 100644 index 0000000000..4e7c549770 --- /dev/null +++ b/cinder/service.py @@ -0,0 +1,622 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import inspect +import os +import random +import signal +import sys +import time + +import eventlet +import greenlet +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder import utils +from cinder import version +from cinder import wsgi + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.IntOpt('report_interval', + default=10, + help='seconds between nodes reporting state to datastore'), + cfg.IntOpt('periodic_interval', + default=60, + help='seconds between running periodic tasks'), + cfg.IntOpt('periodic_fuzzy_delay', + default=60, + help='range of seconds to randomly delay when starting the' + ' periodic task scheduler to reduce stampeding.' + ' (Disable by setting to 0)'), + cfg.StrOpt('osapi_volume_listen', + default="0.0.0.0", + help='IP address for OpenStack Volume API to listen'), + cfg.IntOpt('osapi_volume_listen_port', + default=8776, + help='port for os volume api to listen'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(service_opts) + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = [] + + @staticmethod + def run_server(server): + """Start and wait for a server to finish. + + :param service: Server to run and wait for. + :returns: None + + """ + server.start() + server.wait() + + def launch_server(self, server): + """Load and start the given server. + + :param server: The server you would like to start. + :returns: None + + """ + gt = eventlet.spawn(self.run_server, server) + self._services.append(gt) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + for service in self._services: + service.kill() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + def sigterm(sig, frame): + LOG.audit(_("SIGTERM received")) + # NOTE(jk0): Raise a ^C which is caught by the caller and cleanly + # shuts down the service. This does not yet handle eventlet + # threads. + raise KeyboardInterrupt + + signal.signal(signal.SIGTERM, sigterm) + + for service in self._services: + try: + service.wait() + except greenlet.GreenletExit: + pass + + +class ServerWrapper(object): + def __init__(self, server, workers): + self.server = server + self.workers = workers + self.children = set() + self.forktimes = [] + self.failed = False + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.totalwrap = 0 + self.failedwrap = 0 + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process(self, server): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + signal.signal(signal.SIGTERM, _sigterm) + # Block SIGINT and let the parent send us a SIGTERM + # signal.signal(signal.SIGINT, signal.SIG_IGN) + # This differs from the behavior in nova in that we dont ignore this + # It allows the non-wsgi services to be terminated properly + signal.signal(signal.SIGINT, _sigterm) + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.run_server(server) + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + status = 0 + try: + self._child_process(wrap.server) + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + wrap.server.stop() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_server(self, server, workers=1): + wrap = ServerWrapper(server, workers) + self.totalwrap = self.totalwrap + 1 + LOG.info(_('Starting %d workers'), wrap.workers) + while (self.running and len(wrap.children) < wrap.workers + and not wrap.failed): + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + code = 0 + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals()) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)d exited with status %(code)d'), locals()) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + if 2 == code: + wrap.failed = True + self.failedwrap = self.failedwrap + 1 + LOG.info(_('_wait_child %d'), self.failedwrap) + if self.failedwrap == self.totalwrap: + self.running = False + return wrap + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(.01) + continue + + LOG.info(_('wait wrap.failed %s'), wrap.failed) + while (self.running and len(wrap.children) < wrap.workers + and not wrap.failed): + self._start_child(wrap) + + if self.sigcaught: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[self.sigcaught] + LOG.info(_('Caught %s, stopping children'), signame) + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager and reports + it state to the database services table.""" + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, periodic_fuzzy_delay=None, + service_name=None, *args, **kwargs): + self.host = host + self.binary = binary + self.topic = topic + self.manager_class_name = manager + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, + service_name=service_name, + *args, **kwargs) + self.report_interval = report_interval + self.periodic_interval = periodic_interval + self.periodic_fuzzy_delay = periodic_fuzzy_delay + super(Service, self).__init__(*args, **kwargs) + self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] + + def start(self): + version_string = version.version_string() + LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'), + {'topic': self.topic, 'version_string': version_string}) + self.manager.init_host() + self.model_disconnected = False + ctxt = context.get_admin_context() + try: + service_ref = db.service_get_by_args(ctxt, + self.host, + self.binary) + self.service_id = service_ref['id'] + except exception.NotFound: + self._create_service_ref(ctxt) + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + rpc_dispatcher = self.manager.create_rpc_dispatcher() + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) + + self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + if self.report_interval: + pulse = utils.LoopingCall(self.report_state) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + self.timers.append(pulse) + + if self.periodic_interval: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + periodic = utils.LoopingCall(self.periodic_tasks) + periodic.start(interval=self.periodic_interval, + initial_delay=initial_delay) + self.timers.append(periodic) + + def _create_service_ref(self, context): + zone = FLAGS.storage_availability_zone + service_ref = db.service_create(context, + {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0, + 'availability_zone': zone}) + self.service_id = service_ref['id'] + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None, + periodic_fuzzy_delay=None, service_name=None): + """Instantiates class and passes back application object. + + :param host: defaults to FLAGS.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'cinder-' part + :param manager: defaults to FLAGS._manager + :param report_interval: defaults to FLAGS.report_interval + :param periodic_interval: defaults to FLAGS.periodic_interval + :param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay + + """ + if not host: + host = FLAGS.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary + if not manager: + subtopic = topic.rpartition('cinder-')[2] + manager = FLAGS.get('%s_manager' % subtopic, None) + if report_interval is None: + report_interval = FLAGS.report_interval + if periodic_interval is None: + periodic_interval = FLAGS.periodic_interval + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_interval=periodic_interval, + periodic_fuzzy_delay=periodic_fuzzy_delay, + service_name=service_name) + + return service_obj + + def kill(self): + """Destroy the service object in the datastore.""" + self.stop() + try: + db.service_destroy(context.get_admin_context(), self.service_id) + except exception.NotFound: + LOG.warn(_('Service killed that has no database entry')) + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + for x in self.timers: + try: + x.stop() + except Exception: + pass + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except Exception: + pass + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def report_state(self): + """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() + zone = FLAGS.storage_availability_zone + state_catalog = {} + try: + try: + service_ref = db.service_get(ctxt, self.service_id) + except exception.NotFound: + LOG.debug(_('The service database object disappeared, ' + 'Recreating it.')) + self._create_service_ref(ctxt) + service_ref = db.service_get(ctxt, self.service_id) + + state_catalog['report_count'] = service_ref['report_count'] + 1 + if zone != service_ref['availability_zone']: + state_catalog['availability_zone'] = zone + + db.service_update(ctxt, + self.service_id, state_catalog) + + # TODO(termie): make this pattern be more elegant. + if getattr(self, 'model_disconnected', False): + self.model_disconnected = False + LOG.error(_('Recovered model server connection!')) + + # TODO(vish): this should probably only catch connection errors + except Exception: # pylint: disable=W0702 + if not getattr(self, 'model_disconnected', False): + self.model_disconnected = True + LOG.exception(_('model server went away')) + + +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" + + def __init__(self, name, loader=None): + """Initialize, but do not start the WSGI server. + + :param name: The name of the WSGI server given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None + + """ + self.name = name + self.manager = self._get_manager() + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0") + self.port = getattr(FLAGS, '%s_listen_port' % name, 0) + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port) + + def _get_manager(self): + """Initialize a Manager object appropriate for this service. + + Use the service name to look up a Manager subclass from the + configuration and initialize an instance. If no class name + is configured, just return None. + + :returns: a Manager instance, or None. + + """ + fl = '%s_manager' % self.name + if fl not in FLAGS: + return None + + manager_class_name = FLAGS.get(fl, None) + if not manager_class_name: + return None + + manager_class = importutils.import_class(manager_class_name) + return manager_class() + + def start(self): + """Start serving this service using loaded configuration. + + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. + + :returns: None + + """ + if self.manager: + self.manager.init_host() + self.server.start() + self.port = self.server.port + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() + + +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + +def serve(*servers): + global _launcher + if not _launcher: + _launcher = Launcher() + for server in servers: + _launcher.launch_server(server) + + +def wait(): + LOG.debug(_('Full set of FLAGS:')) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + # hide flag contents from log if contains a password + # should use secret flag when switch over to openstack-common + if ("_password" in flag or "_key" in flag or + (flag == "sql_connection" and "mysql:" in flag_get)): + LOG.debug(_('%(flag)s : FLAG SET ') % locals()) + else: + LOG.debug('%(flag)s : %(flag_get)s' % locals()) + try: + _launcher.wait() + except KeyboardInterrupt: + _launcher.stop() + rpc.cleanup() diff --git a/cinder/share/__init__.py b/cinder/share/__init__.py new file mode 100644 index 0000000000..da1ef5addc --- /dev/null +++ b/cinder/share/__init__.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.share import ' elsewhere. +import cinder.flags +import cinder.openstack.common.importutils + +API = cinder.openstack.common.importutils.import_class( + cinder.flags.FLAGS.share_api_class) diff --git a/cinder/share/api.py b/cinder/share/api.py new file mode 100644 index 0000000000..633ea4fcec --- /dev/null +++ b/cinder/share/api.py @@ -0,0 +1,325 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to shares. +""" + +import functools + +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.image import glance +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common import timeutils +import cinder.policy +from cinder import quota +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder.share import rpcapi as share_rpcapi + +from oslo.config import cfg + + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) +GB = 1048576 * 1024 + + +def wrap_check_policy(func): + """Check policy corresponding to the wrapped methods prior to execution. + + This decorator requires the first 3 args of the wrapped function + to be (self, context, share). + """ + @functools.wraps(func) + def wrapped(self, context, target_obj, *args, **kwargs): + check_policy(context, func.__name__, target_obj) + return func(self, context, target_obj, *args, **kwargs) + + return wrapped + + +def check_policy(context, action, target_obj=None): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + target.update(target_obj or {}) + _action = 'share:%s' % action + cinder.policy.enforce(context, _action, target) + + +class API(base.Base): + """API for interacting with the share manager.""" + + def __init__(self, db_driver=None): + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.share_rpcapi = share_rpcapi.ShareAPI() + super(API, self).__init__(db_driver) + + def create(self, context, share_proto, size, name, description, + snapshot=None, availability_zone=None): + """Create new share.""" + + if snapshot is not None: + if snapshot['status'] != 'available': + msg = _('status must be available') + raise exception.InvalidShareSnapshot(reason=msg) + if not size: + size = snapshot['share_size'] + + snapshot_id = snapshot['id'] + else: + snapshot_id = None + + def as_int(s): + try: + return int(s) + except (ValueError, TypeError): + return s + + # tolerate size as stringified int + size = as_int(size) + + if not isinstance(size, int) or size <= 0: + msg = (_("Share size '%s' must be an integer and greater than 0") + % size) + raise exception.InvalidInput(reason=msg) + + #TODO(rushiagr): Find a suitable place to keep all the allowed + # share types so that it becomes easier to add one + if share_proto.lower() not in ['nfs', 'cifs']: + msg = (_("Invalid share type provided: %s") % share_proto) + raise exception.InvalidInput(reason=msg) + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + options = {'size': size, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': snapshot_id, + 'availability_zone': availability_zone, + 'status': "creating", + 'scheduled_at': timeutils.utcnow(), + 'display_name': name, + 'display_description': description, + 'share_proto': share_proto, + } + + share = self.db.share_create(context, options) + + request_spec = {'share_properties': options, + 'share_proto': share_proto, + 'share_id': share['id'], + 'snapshot_id': share['snapshot_id'], + } + + filter_properties = {} + + self.scheduler_rpcapi.create_share( + context, + FLAGS.share_topic, + share['id'], + snapshot_id, + request_spec=request_spec, + filter_properties=filter_properties) + + return share + + def delete(self, context, share): + """Delete share.""" + share_id = share['id'] + if not share['host']: + # NOTE(rushiagr): scheduling failed, delete + self.db.share_delete(context, share_id) + return + + if share['status'] not in ["available", "error"]: + msg = _("Share status must be available or error") + raise exception.InvalidShare(reason=msg) + + snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) + if len(snapshots): + msg = _("Share still has %d dependent snapshots") % len(snapshots) + raise exception.InvalidVolume(reason=msg) + + now = timeutils.utcnow() + share = self.db.share_update(context, share_id, {'status': 'deleting', + 'terminated_at': now}) + + self.share_rpcapi.delete_share(context, share) + + def create_snapshot(self, context, share, name, description, + force=False): + check_policy(context, 'create_snapshot', share) + + if ((not force) and (share['status'] != "available")): + msg = _("must be available") + raise exception.InvalidShare(reason=msg) + + options = {'share_id': share['id'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'share_size': share['size'], + 'display_name': name, + 'display_description': description, + 'share_proto': share['share_proto'], + 'export_location': share['export_location']} + + snapshot = self.db.share_snapshot_create(context, options) + self.share_rpcapi.create_snapshot(context, share, snapshot) + return snapshot + + @wrap_check_policy + def delete_snapshot(self, context, snapshot, force=False): + if not force and snapshot['status'] not in ["available", "error"]: + msg = _("Share Snapshot status must be available or error") + raise exception.InvalidShareSnapshot(reason=msg) + + self.db.share_snapshot_update(context, snapshot['id'], + {'status': 'deleting'}) + share = self.db.share_get(context, snapshot['share_id']) + self.share_rpcapi.delete_snapshot(context, snapshot, share['host']) + + @wrap_check_policy + def update(self, context, share, fields): + self.db.share_update(context, share['id'], fields) + + def get(self, context, share_id): + rv = self.db.share_get(context, share_id) + check_policy(context, 'get', rv) + return rv + + def get_all(self, context, search_opts={}): + check_policy(context, 'get_all') + + search_opts = search_opts or {} + + if (context.is_admin and 'all_tenants' in search_opts): + # Need to remove all_tenants to pass the filtering below. + del search_opts['all_tenants'] + shares = self.db.share_get_all(context) + else: + shares = self.db.share_get_all_by_project(context, + context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + results = [] + not_found = object() + for share in shares: + for opt, value in search_opts.iteritems(): + if share.get(opt, not_found) != value: + break + else: + results.append(share) + shares = results + return shares + + def get_snapshot(self, context, snapshot_id): + check_policy(context, 'get_snapshot') + rv = self.db.share_snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context, search_opts=None): + check_policy(context, 'get_all_snapshots') + + search_opts = search_opts or {} + + if (context.is_admin and 'all_tenants' in search_opts): + # Need to remove all_tenants to pass the filtering below. + del search_opts['all_tenants'] + snapshots = self.db.share_snapshot_get_all(context) + else: + snapshots = self.db.share_snapshot_get_all_by_project( + context, context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + results = [] + not_found = object() + for snapshot in snapshots: + for opt, value in search_opts.iteritems(): + if snapshot.get(opt, not_found) != value: + break + else: + results.append(snapshot) + snapshots = results + return snapshots + + def allow_access(self, ctx, share, access_type, access_to): + """Allow access to share.""" + if not share['host']: + msg = _("Share host is None") + raise exception.InvalidShare(reason=msg) + if share['status'] not in ["available"]: + msg = _("Share status must be available") + raise exception.InvalidShare(reason=msg) + check_policy(ctx, 'allow_access') + values = {'share_id': share['id'], + 'access_type': access_type, + 'access_to': access_to} + access = self.db.share_access_create(ctx, values) + self.share_rpcapi.allow_access(ctx, share, access) + return access + + def deny_access(self, ctx, share, access): + """Deny access to share.""" + check_policy(ctx, 'deny_access') + #First check state of the target share + if not share['host']: + msg = _("Share host is None") + raise exception.InvalidShare(reason=msg) + if share['status'] not in ["available"]: + msg = _("Share status must be available") + raise exception.InvalidShare(reason=msg) + + #Then check state of the access rule + if access['state'] == access.STATE_ERROR: + self.db.share_access_delete(ctx, access["id"]) + elif access['state'] == access.STATE_ACTIVE: + self.db.share_access_update(ctx, access["id"], + {'state': access.STATE_DELETING}) + self.share_rpcapi.deny_access(ctx, share, access) + else: + msg = _("Access policy should be active or in error state") + raise exception.InvalidShareAccess(reason=msg) + #update share state and send message to manager + + def access_get_all(self, context, share): + """Returns all access rules for share.""" + check_policy(context, 'access_get_all') + rules = self.db.share_access_get_all_for_share(context, share['id']) + return [{'id': rule.id, + 'access_type': rule.access_type, + 'access_to': rule.access_to, + 'state': rule.state} for rule in rules] + + def access_get(self, context, access_id): + """Returns access rule with the id.""" + check_policy(context, 'access_get') + rule = self.db.share_access_get(context, access_id) + return rule diff --git a/cinder/share/configuration.py b/cinder/share/configuration.py new file mode 100644 index 0000000000..4b9ae08b17 --- /dev/null +++ b/cinder/share/configuration.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2012 Rackspace Hosting +# Copyright (c) 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Configuration support for all drivers. + +This module allows support for setting configurations either from default +or from a particular FLAGS group, to be able to set multiple configurations +for a given set of values. + +For instance, two lvm configurations can be set by naming them in groups as + + [lvm1] + volume_group=lvm-group-1 + ... + + [lvm2] + volume_group=lvm-group-2 + ... + +And the configuration group name will be passed in so that all calls to +configuration.volume_group within that instance will be mapped to the proper +named group. + +This class also ensures the implementation's configuration is grafted into the +option group. This is due to the way cfg works. All cfg options must be defined +and registered in the group in which they are used. +""" + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class Configuration(object): + + def __init__(self, share_opts, config_group=None): + """This takes care of grafting the implementation's config + values into the config group.""" + self.config_group = config_group + + # set the local conf so that __call__'s know what to use + if self.config_group: + self._ensure_config_values(share_opts) + self.local_conf = FLAGS._get(self.config_group) + else: + self.local_conf = FLAGS + + def _ensure_config_values(self, share_opts): + FLAGS.register_opts(share_opts, + group=self.config_group) + + def append_config_values(self, share_opts): + self._ensure_config_values(share_opts) + + def safe_get(self, value): + try: + return self.__getattr__(value) + except cfg.NoSuchOptError: + return None + + def __getattr__(self, value): + return getattr(self.local_conf, value) diff --git a/cinder/share/driver.py b/cinder/share/driver.py new file mode 100644 index 0000000000..6a6fb1ba46 --- /dev/null +++ b/cinder/share/driver.py @@ -0,0 +1,178 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for shares. + +""" + +import ConfigParser +import os +import re +import time + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.share.configuration import Configuration +from cinder import utils + +from oslo.config import cfg + + +LOG = logging.getLogger(__name__) + +share_opts = [ + #NOTE(rushiagr): Reasonable to define this option at only one place. + cfg.IntOpt('num_shell_tries', + default=3, + help='number of times to attempt to run flakey shell commands'), + cfg.IntOpt('reserved_share_percentage', + default=0, + help='The percentage of backend capacity reserved'), + cfg.StrOpt('share_backend_name', + default=None, + help='The backend name for a given driver implementation'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(share_opts) + + +#TODO(rushiagr): keep the configuration option in only one class and not two +#NOTE(rushiagr): The right place for this class is cinder.driver or +# cinder.utils. +class ExecuteMixin(object): + """Provides an executable functionality to a driver class.""" + + def __init__(self, *args, **kwargs): + self.db = None + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(share_opts) + self.set_execute(kwargs.pop('execute', utils.execute)) + + def set_execute(self, execute): + self._execute = execute + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except exception.ProcessExecutionError: + tries = tries + 1 + if tries >= self.configuration.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + +class ShareDriver(object): + """Class defines interface of NAS driver.""" + + def __init__(self, *args, **kwargs): + super(ShareDriver, self).__init__() + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(share_opts) + + def allocate_container(self, context, share): + """Is called to allocate container for share.""" + raise NotImplementedError() + + def allocate_container_from_snapshot(self, context, share, snapshot): + """Is called to create share from snapshot.""" + raise NotImplementedError() + + def deallocate_container(self, context, share): + """Is called to deallocate container of share.""" + raise NotImplementedError() + + def create_share(self, context, share): + """Is called to create share.""" + raise NotImplementedError() + + def create_snapshot(self, context, snapshot): + """Is called to create snapshot.""" + raise NotImplementedError() + + def delete_share(self, context, share): + """Is called to remove share.""" + raise NotImplementedError() + + def delete_snapshot(self, context, snapshot): + """Is called to remove snapshot.""" + raise NotImplementedError() + + def create_export(self, context, share): + """Is called to export share.""" + raise NotImplementedError() + + def remove_export(self, context, share): + """Is called to stop exporting share.""" + raise NotImplementedError() + + def ensure_share(self, context, share): + """Invoked to sure that share is exported.""" + raise NotImplementedError() + + def allow_access(self, context, share, access): + """Allow access to the share.""" + raise NotImplementedError() + + def deny_access(self, context, share, access): + """Deny access to the share.""" + raise NotImplementedError() + + def check_for_setup_error(self): + """Check for setup error.""" + pass + + def do_setup(self, context): + """Any initialization the share driver does while starting.""" + pass + + def get_share_stats(self, refresh=False): + """Get share status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_share_status() + + return self._stats + + def _update_share_status(self): + """Retrieve status info from share group.""" + + LOG.debug(_("Updating share status")) + data = {} + backend_name = self.configuration.safe_get('share_backend_name') + data["share_backend_name"] = backend_name or 'Generic_NFS' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + data["storage_protocol"] = None + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data diff --git a/cinder/share/drivers/__init__.py b/cinder/share/drivers/__init__.py new file mode 100644 index 0000000000..96f778e81e --- /dev/null +++ b/cinder/share/drivers/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.share.driver` -- Cinder Share Drivers +===================================================== + +.. automodule:: cinder.share.driver + :platform: Unix + :synopsis: Module containing all the Cinder Share drivers. +""" diff --git a/cinder/share/drivers/lvm.py b/cinder/share/drivers/lvm.py new file mode 100644 index 0000000000..f81b144571 --- /dev/null +++ b/cinder/share/drivers/lvm.py @@ -0,0 +1,609 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +LVM Driver for shares. + +""" + +import ConfigParser +import math +import os +import re + +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.share import driver +from cinder import utils + +from oslo.config import cfg + + +LOG = logging.getLogger(__name__) + +share_opts = [ + cfg.StrOpt('share_export_root', + default='$state_path/mnt', + help='Base folder where exported shares are located'), + cfg.StrOpt('share_export_ip', + default=None, + help='IP to be added to export string'), + cfg.StrOpt('smb_config_path', + default='$state_path/smb.conf'), + cfg.IntOpt('share_lvm_mirrors', + default=0, + help='If set, create lvms with multiple mirrors. Note that ' + 'this requires lvm_mirrors + 2 pvs with available space'), + cfg.StrOpt('share_volume_group', + default='cinder-shares', + help='Name for the VG that will contain exported shares'), + cfg.ListOpt('share_lvm_helpers', + default=[ + 'CIFS=cinder.share.drivers.lvm.CIFSNetConfHelper', + 'NFS=cinder.share.drivers.lvm.NFSHelper', + ], + help='Specify list of share export helpers.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(share_opts) + + +class LVMShareDriver(driver.ExecuteMixin, driver.ShareDriver): + """Executes commands relating to Shares.""" + + def __init__(self, db, *args, **kwargs): + """Do initialization.""" + super(LVMShareDriver, self).__init__(*args, **kwargs) + self.db = db + self._helpers = None + self.configuration.append_config_values(share_opts) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + out, err = self._execute('vgs', '--noheadings', '-o', 'name', + run_as_root=True) + volume_groups = out.split() + if self.configuration.share_volume_group not in volume_groups: + msg = (_("share volume group %s doesn't exist") + % self.configuration.share_volume_group) + raise exception.InvalidParameterValue(err=msg) + if not self.configuration.share_export_ip: + msg = (_("share_export_ip doesn't specified")) + raise exception.InvalidParameterValue(err=msg) + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + super(LVMShareDriver, self).do_setup(context) + self._setup_helpers() + for helper in self._helpers.values(): + helper.init() + + def _setup_helpers(self): + """Initializes protocol-specific NAS drivers.""" + self._helpers = {} + for helper_str in self.configuration.share_lvm_helpers: + share_proto, _, import_str = helper_str.partition('=') + helper = importutils.import_class(import_str) + #TODO(rushiagr): better way to handle configuration + # instead of just passing to the helper + self._helpers[share_proto.upper()] = helper(self._execute, + self.configuration) + + def _local_path(self, share): + # NOTE(vish): stops deprecation warning + escaped_group = \ + self.configuration.share_volume_group.replace('-', '--') + escaped_name = share['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def _allocate_container(self, share_name, sizestr): + cmd = ['lvcreate', '-L', sizestr, '-n', share_name, + self.configuration.share_volume_group] + if self.configuration.share_lvm_mirrors: + cmd += ['-m', self.configuration.share_lvm_mirrors, '--nosync'] + terras = int(sizestr[:-1]) / 1024.0 + if terras >= 1.5: + rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) + # NOTE(vish): Next power of two for region size. See: + # http://red.ht/U2BPOD + cmd += ['-R', str(rsize)] + + self._try_execute(*cmd, run_as_root=True) + + def _deallocate_container(self, share_name): + """Deletes a logical volume for share.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._try_execute('lvremove', '-f', "%s/%s" % + (self.configuration.share_volume_group, + share_name), + run_as_root=True) + + def get_share_stats(self, refresh=False): + """Get share status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_share_status() + + return self._stats + + def _update_share_status(self): + """Retrieve status info from share volume group.""" + + LOG.debug(_("Updating share status")) + data = {} + + # Note(zhiteng): These information are driver/backend specific, + # each driver may define these values in its own config options + # or fetch from driver specific configuration file. + data["share_backend_name"] = 'LVM' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + #TODO(rushiagr): Pick storage_protocol from the helper used. + data["storage_protocol"] = 'NFS_CIFS' + + data['total_capacity_gb'] = 0 + data['free_capacity_gb'] = 0 + data['reserved_percentage'] = \ + self.configuration.reserved_share_percentage + data['QoS_support'] = False + + try: + out, err = self._execute('vgs', '--noheadings', '--nosuffix', + '--unit=G', '-o', 'name,size,free', + self.configuration.share_volume_group, + run_as_root=True) + except exception.ProcessExecutionError as exc: + LOG.error(_("Error retrieving volume status: %s") % exc.stderr) + out = False + + if out: + share = out.split() + data['total_capacity_gb'] = float(share[1]) + data['free_capacity_gb'] = float(share[2]) + + self._stats = data + + def deallocate_container(self, ctx, share): + """Remove LVM volume that will be represented as share.""" + self._deallocate_container(share['name']) + + def allocate_container(self, ctx, share): + """Create LVM volume that will be represented as share.""" + self._allocate_container(share['name'], '%sG' % share['size']) + #create file system + device_name = self._local_path(share) + self._execute('mkfs.ext4', device_name, run_as_root=True) + + def allocate_container_from_snapshot(self, context, share, snapshot): + """Is called to create share from snapshot.""" + self._allocate_container(share['name'], '%sG' % share['size']) + self._copy_volume(self._local_path(snapshot), self._local_path(share), + snapshot['share_size']) + + def create_export(self, ctx, share): + """Exports the volume. Can optionally return a Dictionary of changes + to the share object to be persisted.""" + device_name = self._local_path(share) + location = self._mount_device(share, device_name) + #TODO(rushiagr): what is the provider_location? realy needed? + return {'provider_location': location} + + def remove_export(self, ctx, share): + """Removes an access rules for a share.""" + mount_path = self._get_mount_path(share) + if os.path.exists(mount_path): + #umount, may be busy + try: + self._execute('umount', '-f', mount_path, run_as_root=True) + except exception.ProcessExecutionError, exc: + if 'device is busy' in str(exc): + raise exception.ShareIsBusy(share_name=share['name']) + else: + LOG.info('Unable to umount: %s', exc) + #remove dir + try: + os.rmdir(mount_path) + except OSError: + LOG.info('Unable to delete %s', mount_path) + + def create_share(self, ctx, share): + """Is called after allocate_space to create share on the volume.""" + location = self._get_mount_path(share) + location = self._get_helper(share).create_export(location, + share['name']) + return location + + def create_snapshot(self, context, snapshot): + """Creates a snapshot.""" + orig_lv_name = "%s/%s" % (self.configuration.share_volume_group, + snapshot['share_name']) + self._try_execute('lvcreate', '-L', '%sG' % snapshot['share_size'], + '--name', snapshot['name'], + '--snapshot', orig_lv_name, run_as_root=True) + + def ensure_share(self, ctx, share): + """Ensure that storage are mounted and exported.""" + device_name = self._local_path(share) + location = self._mount_device(share, device_name) + self._get_helper(share).create_export(location, share['name'], + recreate=True) + + def delete_share(self, ctx, share): + """Delete a share.""" + try: + location = self._get_mount_path(share) + self._get_helper(share).remove_export(location, share['name']) + except exception.ProcessExecutionError: + LOG.info("Can't remove share %r" % share['id']) + except exception.InvalidShare, exc: + LOG.info(exc.message) + + def delete_snapshot(self, context, snapshot): + """Deletes a snapshot.""" + self._deallocate_container(snapshot['name']) + + def allow_access(self, ctx, share, access): + """Allow access to the share.""" + location = self._get_mount_path(share) + self._get_helper(share).allow_access(location, share['name'], + access['access_type'], + access['access_to']) + + def deny_access(self, ctx, share, access): + """Allow access to the share.""" + location = self._get_mount_path(share) + self._get_helper(share).deny_access(location, share['name'], + access['access_type'], + access['access_to']) + + def _get_helper(self, share): + if share['share_proto'].startswith('NFS'): + return self._helpers['NFS'] + elif share['share_proto'].startswith('CIFS'): + return self._helpers['CIFS'] + else: + raise exception.InvalidShare(reason='Wrong share type') + + def _mount_device(self, share, device_name): + """Mount LVM share and ignore if already mounted.""" + mount_path = self._get_mount_path(share) + self._execute('mkdir', '-p', mount_path) + try: + self._execute('mount', device_name, mount_path, + run_as_root=True, check_exit_code=True) + self._execute('chmod', '777', mount_path, + run_as_root=True, check_exit_code=True) + except exception.ProcessExecutionError as exc: + if 'already mounted' in exc.stderr: + LOG.warn(_("%s is already mounted"), device_name) + else: + raise + return mount_path + + def _get_mount_path(self, share): + """Returns path where share is mounted.""" + return os.path.join(self.configuration.share_export_root, + share['name']) + + def _copy_volume(self, srcstr, deststr, size_in_g): + # Use O_DIRECT to avoid thrashing the system buffer cache + extra_flags = ['iflag=direct', 'oflag=direct'] + + # Check whether O_DIRECT is supported + try: + self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, + *extra_flags, run_as_root=True) + except exception.ProcessExecutionError: + extra_flags = [] + + # Perform the copy + self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M', + *extra_flags, run_as_root=True) + + +class NASHelperBase(object): + """Interface to work with share.""" + + def __init__(self, execute, config_object): + self.configuration = config_object + self._execute = execute + + def init(self): + pass + + def create_export(self, local_path, share_name, recreate=False): + """Create new export, delete old one if exists.""" + raise NotImplementedError() + + def remove_export(self, local_path, share_name): + """Remove export.""" + raise NotImplementedError() + + def allow_access(self, local_path, share_name, access_type, access): + """Allow access to the host.""" + raise NotImplementedError() + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Deny access to the host.""" + raise NotImplementedError() + + +class NFSHelper(NASHelperBase): + """Interface to work with share.""" + + def __init__(self, execute, config_object): + super(NFSHelper, self).__init__(execute, config_object) + try: + self._execute('exportfs', check_exit_code=True, + run_as_root=True) + except exception.ProcessExecutionError: + raise exception.Error('NFS server not found') + + def create_export(self, local_path, share_name, recreate=False): + """Create new export, delete old one if exists.""" + return ':'.join([self.configuration.share_export_ip, local_path]) + + def remove_export(self, local_path, share_name): + """Remove export.""" + pass + + def allow_access(self, local_path, share_name, access_type, access): + """Allow access to the host""" + if access_type != 'ip': + reason = 'only ip access type allowed' + raise exception.InvalidShareAccess(reason) + #check if presents in export + out, _ = self._execute('exportfs', run_as_root=True) + out = re.search(re.escape(local_path) + '[\s\n]*' + re.escape(access), + out) + if out is not None: + raise exception.ShareAccessExists(access_type=access_type, + access=access) + + self._execute('exportfs', '-o', 'rw,no_subtree_check', + ':'.join([access, local_path]), run_as_root=True, + check_exit_code=True) + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Deny access to the host.""" + self._execute('exportfs', '-u', ':'.join([access, local_path]), + run_as_root=True, check_exit_code=False) + + +class CIFSHelper(NASHelperBase): + """Class provides functionality to operate with cifs shares""" + + def __init__(self, execute, config_object): + """Store executor and configuration path.""" + super(CIFSHelper, self).__init__(execute, config_object) + self.config = self.configuration.smb_config_path + self.test_config = "%s_" % (self.config,) + + def init(self): + """Initialize environment.""" + self._recreate_config() + self._ensure_daemon_started() + + def create_export(self, local_path, share_name, recreate=False): + """Create new export, delete old one if exists.""" + parser = ConfigParser.ConfigParser() + parser.read(self.config) + #delete old one + if parser.has_section(share_name): + if recreate: + parser.remove_section(share_name) + else: + raise exception.Error('Section exists') + #Create new one + parser.add_section(share_name) + parser.set(share_name, 'path', local_path) + parser.set(share_name, 'browseable', 'yes') + parser.set(share_name, 'guest ok', 'yes') + parser.set(share_name, 'read only', 'no') + parser.set(share_name, 'writable', 'yes') + parser.set(share_name, 'create mask', '0755') + parser.set(share_name, 'hosts deny', '0.0.0.0/0') # denying all ips + parser.set(share_name, 'hosts allow', '127.0.0.1') + #NOTE(rushiagr): ensure that local_path dir is existing + if not os.path.exists(local_path): + os.makedirs(local_path) + self._execute('chown', 'nobody', '-R', local_path, run_as_root=True) + self._update_config(parser) + return '//%s/%s' % (self.configuration.share_export_ip, share_name) + + def remove_export(self, local_path, share_name): + """Remove export.""" + parser = ConfigParser.ConfigParser() + parser.read(self.config) + #delete old one + if parser.has_section(share_name): + parser.remove_section(share_name) + self._update_config(parser) + self._execute('smbcontrol', 'all', 'close-share', share_name, + run_as_root=True) + + def allow_access(self, local_path, share_name, access_type, access): + """Allow access to the host.""" + if access_type != 'ip': + reason = 'only ip access type allowed' + raise exception.InvalidShareAccess(reason) + parser = ConfigParser.ConfigParser() + parser.read(self.config) + + hosts = parser.get(share_name, 'hosts allow') + if access in hosts.split(): + raise exception.ShareAccessExists(access_type=access_type, + access=access) + hosts += ' %s' % (access,) + parser.set(share_name, 'hosts allow', hosts) + self._update_config(parser) + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Deny access to the host.""" + parser = ConfigParser.ConfigParser() + try: + parser.read(self.config) + hosts = parser.get(share_name, 'hosts allow') + hosts = hosts.replace(' %s' % (access,), '', 1) + parser.set(share_name, 'hosts allow', hosts) + self._update_config(parser) + except ConfigParser.NoSectionError: + if not force: + raise + + def _ensure_daemon_started(self): + """ + FYI: smbd starts at least two processes. + """ + out, _ = self._execute(*'ps -C smbd -o args='.split(), + check_exit_code=False) + processes = [process.strip() for process in out.split('\n') + if process.strip()] + + cmd = 'smbd -s %s -D' % (self.config,) + + running = False + for process in processes: + if not process.endswith(cmd): + #alternatively exit + raise exception.Error('smbd already started with wrong config') + running = True + + if not running: + self._execute(*cmd.split(), run_as_root=True) + + def _recreate_config(self): + """create new SAMBA configuration file.""" + if os.path.exists(self.config): + os.unlink(self.config) + parser = ConfigParser.ConfigParser() + parser.add_section('global') + parser.set('global', 'security', 'user') + parser.set('global', 'server string', '%h server (Samba, Openstack)') + + self._update_config(parser, restart=False) + + def _update_config(self, parser, restart=True): + """Check if new configuration is correct and save it.""" + #Check that configuration is correct + with open(self.test_config, 'w') as fp: + parser.write(fp) + self._execute('testparm', '-s', self.test_config, + check_exit_code=True) + #save it + with open(self.config, 'w') as fp: + parser.write(fp) + #restart daemon if necessary + if restart: + self._execute(*'pkill -HUP smbd'.split(), run_as_root=True) + + +class CIFSNetConfHelper(NASHelperBase): + """Manage shares in samba server by net conf tool. + + Class provides functionality to operate with CIFS shares. Samba + server should be configured to use registry as configuration + backend to allow dynamically share managements. There are two ways + to done that, one of them is to add specific parameter in the + global configuration section at smb.conf: + + [global] + include = registry + + For more inforation see smb.conf(5). + """ + + def create_export(self, local_path, share_name, recreate=False): + """Create share at samba server.""" + create_cmd = ('net', 'conf', 'addshare', share_name, local_path, + 'writeable=y', 'guest_ok=y') + try: + self._execute(*create_cmd, run_as_root=True) + except exception.ProcessExecutionError as e: + if 'already exists' in e.stderr: + if recreate: + self._execute('net', 'conf', 'delshare', share_name, + run_as_root=True) + self._execute(*create_cmd, run_as_root=True) + else: + msg = _('Share section %r already defined.') % (share_name) + raise exception.ShareBackendException(msg=msg) + else: + raise + parameters = { + 'browseable': 'yes', + 'create mask': '0755', + 'hosts deny': '0.0.0.0/0', # deny all + 'hosts allow': '127.0.0.1', + } + for name, value in parameters.items(): + self._execute('net', 'conf', 'setparm', share_name, name, value, + run_as_root=True) + return '//%s/%s' % (self.configuration.share_export_ip, share_name) + + def remove_export(self, local_path, share_name): + """Remove share definition from samba server.""" + try: + self._execute('net', 'conf', 'delshare', share_name, + run_as_root=True) + except exception.ProcessExecutionError as e: + if 'SBC_ERR_NO_SUCH_SERVICE' not in e.stderr: + raise + self._execute('smbcontrol', 'all', 'close-share', share_name, + run_as_root=True) + + def allow_access(self, local_path, share_name, access_type, access): + """Add to allow hosts additional access rule.""" + if access_type != 'ip': + reason = _('only ip access type allowed') + raise exception.InvalidShareAccess(reason=reason) + + hosts = self._get_allow_hosts(share_name) + if access in hosts: + raise exception.ShareAccessExists(access_type=access_type, + access=access) + hosts.append(access) + self._set_allow_hosts(hosts, share_name) + + def deny_access(self, local_path, share_name, access_type, access, + force=False): + """Remove from allow hosts permit rule.""" + try: + hosts = self._get_allow_hosts(share_name) + hosts.remove(access) + self._set_allow_hosts(hosts, share_name) + except exception.ProcessExecutionError as e: + if not ('does not exist' in e.stdout and force): + raise + + def _get_allow_hosts(self, share_name): + (out, _) = self._execute('net', 'conf', 'getparm', share_name, + 'hosts allow', run_as_root=True) + return out.split() + + def _set_allow_hosts(self, hosts, share_name): + value = ' '.join(hosts) + self._execute('net', 'conf', 'setparm', share_name, 'hosts allow', + value, run_as_root=True) diff --git a/cinder/share/drivers/netapp.py b/cinder/share/drivers/netapp.py new file mode 100644 index 0000000000..6baa83904a --- /dev/null +++ b/cinder/share/drivers/netapp.py @@ -0,0 +1,745 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +NetApp specific NAS storage driver. Supports NFS and CIFS protocols. + +This driver requires NetApp OnCommand 5.0 and one or more Data +ONTAP 7-mode storage systems with installed CIFS and NFS licenses. +""" +import suds +from suds.sax import text + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log +from cinder.share import driver + +from oslo.config import cfg + + +LOG = log.getLogger(__name__) + +NETAPP_NAS_OPTS = [ + cfg.StrOpt('netapp_nas_wsdl_url', + default=None, + help='URL of the WSDL file for the DFM server'), + cfg.StrOpt('netapp_nas_login', + default=None, + help='User name for the DFM server'), + cfg.StrOpt('netapp_nas_password', + default=None, + help='Password for the DFM server'), + cfg.StrOpt('netapp_nas_server_hostname', + default=None, + help='Hostname for the DFM server'), + cfg.IntOpt('netapp_nas_server_port', + default=8088, + help='Port number for the DFM server'), + cfg.BoolOpt('netapp_nas_server_secure', + default=True, + help='Use secure connection to server.'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(NETAPP_NAS_OPTS) + + +class NetAppShareDriver(driver.ShareDriver): + """ + NetApp specific NAS driver. Allows for NFS and CIFS NAS storage usage. + """ + + def __init__(self, db, *args, **kwargs): + super(NetAppShareDriver, self).__init__(*args, **kwargs) + self.db = db + self._helpers = None + self._share_table = {} + self.configuration.append_config_values(NETAPP_NAS_OPTS) + self._client = NetAppApiClient(self.configuration) + + def allocate_container(self, context, share): + """Allocate space for the share on aggregates.""" + aggregate = self._find_best_aggregate() + filer = aggregate.FilerId + self._allocate_share_space(aggregate, share) + self._remember_share(share['id'], filer) + + def allocate_container_from_snapshot(self, context, share, snapshot): + """Creates a share from a snapshot.""" + share_name = _get_valid_share_name(share['id']) + parent_share_name = _get_valid_share_name(snapshot['share_id']) + parent_snapshot_name = _get_valid_snapshot_name(snapshot['id']) + + filer = self._get_filer(snapshot['share_id']) + + xml_args = ('%s' + '%s' + '%s') % \ + (share_name, parent_share_name, parent_snapshot_name) + self._client.send_request_to(filer, 'volume-clone-create', xml_args) + self._remember_share(share['id'], filer) + + def deallocate_container(self, context, share): + """Free share space.""" + target = self._get_filer(share['id']) + if target: + self._share_offline(target, share) + self._delete_share(target, share) + self._forget_share(share['id']) + + def create_share(self, context, share): + """Creates NAS storage.""" + helper = self._get_helper(share) + filer = self._get_filer(share['id']) + export_location = helper.create_share(filer, share) + return export_location + + def create_snapshot(self, context, snapshot): + """Creates a snapshot of a share.""" + share_name = _get_valid_share_name(snapshot['share_id']) + snapshot_name = _get_valid_snapshot_name(snapshot['id']) + + filer = self._get_filer(snapshot['share_id']) + + xml_args = ('%s' + '%s') % (share_name, snapshot_name) + self._client.send_request_to(filer, 'snapshot-create', xml_args) + + def delete_share(self, context, share): + """Deletes NAS storage.""" + helper = self._get_helper(share) + target = helper.get_target(share) + # share may be in error state, so there's no share and target + if target: + helper.delete_share(share) + + def delete_snapshot(self, context, snapshot): + """Deletes a snapshot of a share.""" + share_name = _get_valid_share_name(snapshot['share_id']) + snapshot_name = _get_valid_snapshot_name(snapshot['id']) + + filer = self._get_filer(snapshot['share_id']) + + self._is_snapshot_busy(filer, share_name, snapshot_name) + xml_args = ('%s' + '%s') % (snapshot_name, share_name) + self._client.send_request_to(filer, 'snapshot-delete', xml_args) + + def create_export(self, context, share): + """Share already exported.""" + pass + + def remove_export(self, context, share): + """Share already removed.""" + pass + + def ensure_share(self, context, share): + """Remember previously created shares.""" + helper = self._get_helper(share) + filer = helper.get_target(share) + self._remember_share(share['id'], filer) + + def allow_access(self, context, share, access): + """Allows access to a given NAS storage for IPs in :access:""" + helper = self._get_helper(share) + return helper.allow_access(context, share, access) + + def deny_access(self, context, share, access): + """Denies access to a given NAS storage for IPs in :access:""" + helper = self._get_helper(share) + return helper.deny_access(context, share, access) + + def do_setup(self, context): + """Prepare once the driver. + + Called once by the manager after the driver is loaded. + Validate the flags we care about and setup the suds (web + services) client. + """ + self._client.do_setup() + self._setup_helpers() + + def check_for_setup_error(self): + """Raises error if prerequisites are not met.""" + self._client.check_configuration(self.configuration) + + def _get_filer(self, share_id): + """Returns filer name for the share_id.""" + try: + return self._share_table[share_id] + except KeyError: + return + + def _remember_share(self, share_id, filer): + """Stores required share info in local state.""" + self._share_table[share_id] = filer + + def _forget_share(self, share_id): + """Remove share info about share.""" + try: + self._share_table.pop(share_id) + except KeyError: + pass + + def _share_offline(self, target, share): + """Sends share offline. Required before deleting a share.""" + share_name = _get_valid_share_name(share['id']) + xml_args = ('%s') % share_name + self._client.send_request_to(target, 'volume-offline', xml_args) + + def _delete_share(self, target, share): + """Destroys share on a target OnTap device.""" + share_name = _get_valid_share_name(share['id']) + xml_args = ('true' + '%s') % share_name + self._client.send_request_to(target, 'volume-destroy', xml_args) + + def _setup_helpers(self): + """Initializes protocol-specific NAS drivers.""" + #TODO(rushiagr): better way to handle configuration instead of just + # passing to the helper + self._helpers = { + 'CIFS': NetAppCIFSHelper(self._client, + self.configuration), + 'NFS': NetAppNFSHelper(self._client, + self.configuration), + } + + def _get_helper(self, share): + """Returns driver which implements share protocol.""" + share_proto = share['share_proto'] + + for proto in self._helpers.keys(): + if share_proto.upper().startswith(proto): + return self._helpers[proto] + + err_msg = _("Invalid NAS protocol supplied: %s. ") % (share_proto) + + raise exception.Error(err_msg) + + def _find_best_aggregate(self): + """Returns aggregate with the most free space left.""" + aggrs = self._client.get_available_aggregates() + if aggrs is None: + raise exception.Error(_("No aggregates available")) + + best_aggregate = max(aggrs.Aggregates.AggregateInfo, + key=lambda ai: ai.AggregateSize.SizeAvailable) + return best_aggregate + + def _allocate_share_space(self, aggregate, share): + """Create new share on aggregate.""" + filer_id = aggregate.FilerId + aggr_name = aggregate.AggregateName.split(':')[1] + share_name = _get_valid_share_name(share['id']) + args_xml = ('%s' + '%dg' + '%s') % (aggr_name, share['size'], + share_name) + self._client.send_request_to(filer_id, 'volume-create', args_xml) + + def _is_snapshot_busy(self, filer, share_name, snapshot_name): + """Raises ShareSnapshotIsBusy if snapshot is busy.""" + xml_args = ('%s') % share_name + snapshots = self._client.send_request_to(filer, + 'snapshot-list-info', + xml_args, + do_response_check=False) + + for snap in snapshots.Results.snapshots[0]['snapshot-info']: + if snap['name'][0] == snapshot_name and snap['busy'][0] == 'true': + raise exception.ShareSnapshotIsBusy( + snapshot_name=snapshot_name) + + def get_share_stats(self, refresh=False): + """Get share status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_share_status() + + return self._stats + + def _update_share_status(self): + """Retrieve status info from share volume group.""" + + LOG.debug(_("Updating share status")) + data = {} + + # Note(zhiteng): These information are driver/backend specific, + # each driver may define these values in its own config options + # or fetch from driver specific configuration file. + data["share_backend_name"] = 'NetApp_7_mode' + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + #TODO(rushiagr): Pick storage_protocol from the helper used. + data["storage_protocol"] = 'NFS_CIFS' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 0 + data['QoS_support'] = False + + self._stats = data + + +def _check_response(request, response): + """Checks RPC responses from NetApp devices.""" + if response.Status == 'failed': + name = request.Name + reason = response.Reason + msg = _('API %(name)s failed: %(reason)s') + raise exception.Error(msg % locals()) + + +def _get_valid_share_name(share_id): + """The name can contain letters, numbers, and the underscore + character (_). The first character must be a letter or an + underscore.""" + return 'share_' + share_id.replace('-', '_') + + +def _get_valid_snapshot_name(snapshot_id): + """The name can contain letters, numbers, and the underscore + character (_). The first character must be a letter or an + underscore.""" + return 'share_snapshot_' + snapshot_id.replace('-', '_') + + +class NetAppApiClient(object): + """Wrapper around DFM commands.""" + + REQUIRED_FLAGS = ['netapp_nas_wsdl_url', + 'netapp_nas_login', + 'netapp_nas_password', + 'netapp_nas_server_hostname', + 'netapp_nas_server_port'] + + def __init__(self, configuration): + self.configuration = configuration + self._client = None + + def do_setup(self): + """Setup suds (web services) client.""" + protocol = 'https' if self.configuration.netapp_nas_server_secure \ + else 'http' + soap_url = ('%s://%s:%s/apis/soap/v1' % + (protocol, + self.configuration.netapp_nas_server_hostname, + self.configuration.netapp_nas_server_port)) + + self._client = \ + suds.client.Client(self.configuration.netapp_nas_wsdl_url, + username=self.configuration.netapp_nas_login, + password=self.configuration.netapp_nas_password, + location=soap_url) + + LOG.info('NetApp RPC client started') + + def send_request_to(self, target, request, xml_args=None, + do_response_check=True): + """ + Sends RPC :request: to :target:. + :param target: IP address, ID or network name of OnTap device + :param request: API name + :param xml_args: call arguments + :param do_response_check: if set to True and RPC call has failed, + raises exception. + """ + client = self._client + srv = client.service + + rpc = client.factory.create('Request') + rpc.Name = request + rpc.Args = text.Raw(xml_args) + response = srv.ApiProxy(Request=rpc, Target=target) + + if do_response_check: + _check_response(rpc, response) + + return response + + def get_available_aggregates(self): + """Returns list of aggregates known by DFM.""" + srv = self._client.service + resp = srv.AggregateListInfoIterStart() + tag = resp.Tag + + try: + avail_aggrs = srv.AggregateListInfoIterNext(Tag=tag, + Maximum=resp.Records) + finally: + srv.AggregateListInfoIterEnd(tag) + + return avail_aggrs + + def get_host_ip_by(self, host_id): + """Returns IP address of a host known by DFM.""" + if (type(host_id) is str or type(host_id) is unicode) and \ + len(host_id.split('.')) == 4: + # already IP + return host_id + + client = self._client + srv = client.service + + filer_filter = client.factory.create('HostListInfoIterStart') + filer_filter.ObjectNameOrId = host_id + resp = srv.HostListInfoIterStart(HostListInfoIterStart=filer_filter) + tag = resp.Tag + + try: + filers = srv.HostListInfoIterNext(Tag=tag, Maximum=resp.Records) + finally: + srv.HostListInfoIterEnd(Tag=tag) + + ip = None + for host in filers.Hosts.HostInfo: + if int(host.HostId) == int(host_id): + ip = host.HostAddress + + return ip + + @staticmethod + def check_configuration(config_object): + """Ensure that the flags we care about are set.""" + for flag in NetAppApiClient.REQUIRED_FLAGS: + if not getattr(config_object, flag, None): + raise exception.Error(_('%s is not set') % flag) + + +class NetAppNASHelperBase(object): + """Interface for protocol-specific NAS drivers.""" + def __init__(self, suds_client, config_object): + self.configuration = config_object + self._client = suds_client + + def create_share(self, target_id, share): + """Creates NAS share.""" + raise NotImplementedError() + + def delete_share(self, share): + """Deletes NAS share.""" + raise NotImplementedError() + + def allow_access(self, context, share, new_rules): + """Allows new_rules to a given NAS storage for IPs in :new_rules.""" + raise NotImplementedError() + + def deny_access(self, context, share, new_rules): + """Denies new_rules to a given NAS storage for IPs in :new_rules:.""" + raise NotImplementedError() + + def get_target(self, share): + """Returns host where the share located..""" + raise NotImplementedError() + + +class NetAppNFSHelper(NetAppNASHelperBase): + """Netapp specific NFS sharing driver.""" + + def __init__(self, suds_client, config_object): + self.configuration = config_object + super(NetAppNFSHelper, self).__init__(suds_client, config_object) + + def create_share(self, target_id, share): + """Creates NFS share""" + args_xml = ('' + '' + '%s' + '' + '' + '' + '' + 'localhost' + '' + '' + '' + '' + 'false' + 'localhost' + '' + '' + '' + '' + '' + '') + + client = self._client + valid_share_name = _get_valid_share_name(share['id']) + export_pathname = '/vol/' + valid_share_name + + client.send_request_to(target_id, 'nfs-exportfs-append-rules-2', + args_xml % export_pathname) + + export_ip = client.get_host_ip_by(target_id) + export_location = ':'.join([export_ip, export_pathname]) + return export_location + + def delete_share(self, share): + """Deletes NFS share.""" + target, export_path = self._get_export_path(share) + + xml_args = ('' + '' + '%s' + '' + '') % export_path + + self._client.send_request_to(target, 'nfs-exportfs-delete-rules', + xml_args) + + def allow_access(self, context, share, access): + """Allows access to a given NFS storage for IPs in :access:.""" + if access['access_type'] != 'ip': + raise exception.Error(('Invalid access type supplied. ' + 'Only \'ip\' type is supported')) + + ips = access['access_to'] + + existing_rules = self._get_exisiting_rules(share) + new_rules_xml = self._append_new_rules_to(existing_rules, ips) + + self._modify_rule(share, new_rules_xml) + + def deny_access(self, context, share, access): + """Denies access to a given NFS storage for IPs in :access:.""" + denied_ips = access['access_to'] + existing_rules = self._get_exisiting_rules(share) + + if type(denied_ips) is not list: + denied_ips = [denied_ips] + + for deny_rule in denied_ips: + try: + existing_rules.remove(deny_rule) + except ValueError: + pass + + new_rules_xml = self._append_new_rules_to([], existing_rules) + self._modify_rule(share, new_rules_xml) + + def get_target(self, share): + """Returns ID of target OnTap device based on export location.""" + return self._get_export_path(share)[0] + + def _modify_rule(self, share, rw_rules): + """Modifies access rule for a share.""" + target, export_path = self._get_export_path(share) + + xml_args = ('true' + '' + '' + '%s' + '%s' + '' + '' + '') % (export_path, ''.join(rw_rules)) + + self._client.send_request_to(target, 'nfs-exportfs-append-rules-2', + xml_args) + + def _get_exisiting_rules(self, share): + """Returns available access rules for the share.""" + target, export_path = self._get_export_path(share) + xml_args = '%s' % export_path + + response = self._client.send_request_to(target, + 'nfs-exportfs-list-rules-2', + xml_args) + + rules = response.Results.rules[0] + security_rule = rules['exports-rule-info-2'][0]['security-rules'][0] + security_info = security_rule['security-rule-info'][0] + root_rules = security_info['root'][0] + allowed_hosts = root_rules['exports-hostname-info'] + + existing_rules = [] + + for allowed_host in allowed_hosts: + if 'name' in allowed_host: + existing_rules.append(allowed_host['name'][0]) + + return existing_rules + + @staticmethod + def _append_new_rules_to(existing_rules, new_rules): + """Adds new rules to existing.""" + security_rule_xml = ('' + '%s' + '' + '%s' + '' + '') + + hostname_info_xml = ('' + '%s' + '') + + allowed_hosts_xml = [] + + if type(new_rules) is not list: + new_rules = [new_rules] + + all_rules = existing_rules + new_rules + + for ip in all_rules: + allowed_hosts_xml.append(hostname_info_xml % ip) + + return security_rule_xml % (allowed_hosts_xml, allowed_hosts_xml) + + @staticmethod + def _get_export_path(share): + """Returns IP address and export location of a share.""" + export_location = share['export_location'] + + if export_location is None: + export_location = ':' + + return export_location.split(':') + + +class NetAppCIFSHelper(NetAppNASHelperBase): + """Netapp specific NFS sharing driver.""" + + CIFS_USER_GROUP = 'Administrators' + + def __init__(self, suds_client, config_object): + self.configuration = config_object + super(NetAppCIFSHelper, self).__init__(suds_client, config_object) + + def create_share(self, target_id, share): + """Creates CIFS storage.""" + cifs_status = self._get_cifs_status(target_id) + + if cifs_status == 'stopped': + self._start_cifs_service(target_id) + + share_name = _get_valid_share_name(share['id']) + + self._set_qtree_security(target_id, share) + self._add_share(target_id, share_name) + self._restrict_access(target_id, 'everyone', share_name) + + ip_address = self._client.get_host_ip_by(target_id) + + cifs_location = self._set_export_location(ip_address, share_name) + + return cifs_location + + def delete_share(self, share): + """Deletes CIFS storage.""" + host_ip, share_name = self._get_export_location(share) + xml_args = '%s' % share_name + self._client.send_request_to(host_ip, 'cifs-share-delete', xml_args) + + def allow_access(self, context, share, access): + """Allows access to a given CIFS storage for IPs in :access:.""" + if access['access_type'] != 'passwd': + ex_text = ('NetApp only supports "passwd" access type for CIFS.') + raise exception.Error(ex_text) + + user = access['access_to'] + target, share_name = self._get_export_location(share) + + if self._user_exists(target, user): + self._allow_access_for(target, user, share_name) + else: + exc_text = ('User "%s" does not exist on %s OnTap.') % (user, + target) + raise exception.Error(exc_text) + + def deny_access(self, context, share, access): + """Denies access to a given CIFS storage for IPs in access.""" + host_ip, share_name = self._get_export_location(share) + user = access['access_to'] + + self._restrict_access(host_ip, user, share_name) + + def get_target(self, share): + """Returns OnTap target IP based on share export location.""" + return self._get_export_location(share)[0] + + def _set_qtree_security(self, target, share): + client = self._client + share_name = '/vol/' + _get_valid_share_name(share['id']) + + xml_args = ('' + 'qtree' + 'security' + '%s' + 'mixed' + '') % share_name + + client.send_request_to(target, 'system-cli', xml_args) + + def _restrict_access(self, target, user_name, share_name): + xml_args = ('%s' + '%s') % (user_name, share_name) + self._client.send_request_to(target, 'cifs-share-ace-delete', + xml_args) + + def _start_cifs_service(self, target_id): + """Starts CIFS service on OnTap target.""" + client = self._client + return client.send_request_to(target_id, 'cifs-start', + do_response_check=False) + + @staticmethod + def _get_export_location(share): + """Returns export location for a given CIFS share.""" + export_location = share['export_location'] + + if export_location is None: + export_location = '///' + + _, _, host_ip, share_name = export_location.split('/') + return host_ip, share_name + + @staticmethod + def _set_export_location(ip, share_name): + """Returns export location of a share.""" + return "//%s/%s" % (ip, share_name) + + def _get_cifs_status(self, target_id): + """Returns status of a CIFS service on target OnTap.""" + client = self._client + response = client.send_request_to(target_id, 'cifs-status') + return response.Status + + def _allow_access_for(self, target, username, share_name): + """Allows access to the CIFS share for a given user.""" + xml_args = ('rwx' + '%s' + '%s') % (share_name, username) + self._client.send_request_to(target, 'cifs-share-ace-set', xml_args) + + def _user_exists(self, target, user): + """Returns True if user already exists on a target OnTap.""" + xml_args = ('%s') % user + resp = self._client.send_request_to(target, + 'useradmin-user-list', + xml_args, + do_response_check=False) + + return (resp.Status == 'passed') + + def _add_share(self, target_id, share_name): + """Creates CIFS share on target OnTap host.""" + client = self._client + xml_args = ('/vol/%s' + '%s') % (share_name, share_name) + client.send_request_to(target_id, 'cifs-share-add', xml_args) diff --git a/cinder/share/manager.py b/cinder/share/manager.py new file mode 100644 index 0000000000..18232f0446 --- /dev/null +++ b/cinder/share/manager.py @@ -0,0 +1,221 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""NAS share manager managers creating shares and access rights. + +**Related Flags** + +:share_driver: Used by :class:`ShareManager`. Defaults to + :class:`cinder.share.drivers.lvm.LVMShareDriver`. +""" + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.share.configuration import Configuration + +from oslo.config import cfg + +LOG = logging.getLogger(__name__) + +share_manager_opts = [ + cfg.StrOpt('share_driver', + default='cinder.share.drivers.lvm.LVMShareDriver', + help='Driver to use for share creation'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(share_manager_opts) + + +class ShareManager(manager.SchedulerDependentManager): + """Manages NAS storages.""" + + RPC_API_VERSION = '1.1' + + def __init__(self, share_driver=None, service_name=None, *args, **kwargs): + """Load the driver from args, or from flags.""" + self.configuration = Configuration(share_manager_opts, + config_group=service_name) + super(ShareManager, self).__init__(service_name='share', + *args, **kwargs) + if not share_driver: + share_driver = self.configuration.share_driver + self.driver = importutils.import_object( + share_driver, + self.db, + configuration=self.configuration) + + def init_host(self): + """Initialization for a standalone service.""" + + ctxt = context.get_admin_context() + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + + shares = self.db.share_get_all_by_host(ctxt, self.host) + LOG.debug(_("Re-exporting %s shares"), len(shares)) + for share in shares: + if share['status'] in ['available', 'in-use']: + self.driver.ensure_share(ctxt, share) + rules = self.db.share_access_get_all_for_share(ctxt, + share['id']) + for access_ref in rules: + if access_ref['state'] == access_ref.STATE_ACTIVE: + try: + self.driver.allow_access(ctxt, share, + access_ref) + except exception.ShareAccessExists: + pass + else: + LOG.info(_("share %s: skipping export"), share['name']) + + self.publish_service_capabilities(ctxt) + + def create_share(self, context, share_id, request_spec=None, + filter_properties=None, snapshot_id=None): + """Creates a share.""" + context = context.elevated() + if filter_properties is None: + filter_properties = {} + + share_ref = self.db.share_get(context, share_id) + if snapshot_id is not None: + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + else: + snapshot_ref = None + + try: + if snapshot_ref: + self.driver.allocate_container_from_snapshot(context, + share_ref, + snapshot_ref) + else: + self.driver.allocate_container(context, share_ref) + export_location = self.driver.create_share(context, share_ref) + self.db.share_update(context, share_id, + {'export_location': export_location}) + self.driver.create_export(context, share_ref) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_update(context, share_id, {'status': 'error'}) + else: + self.db.share_update(context, share_id, + {'status': 'available', + 'launched_at': timeutils.utcnow()}) + + def delete_share(self, context, share_id): + """Delete a share.""" + share_ref = self.db.share_get(context, share_id) + rules = self.db.share_access_get_all_for_share(context, share_id) + try: + for access_ref in rules: + self._deny_access(context, access_ref, share_ref) + self.driver.remove_export(context, share_ref) + self.driver.delete_share(context, share_ref) + self.driver.deallocate_container(context, share_ref) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_update(context, share_id, + {'status': 'error_deleting'}) + else: + self.db.share_delete(context, share_id) + + def create_snapshot(self, context, share_id, snapshot_id): + """Create snapshot for share.""" + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + + try: + snap_name = snapshot_ref['name'] + model_update = self.driver.create_snapshot(context, snapshot_ref) + if model_update: + self.db.share_snapshot_update(context, snapshot_ref['id'], + model_update) + + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_snapshot_update(context, + snapshot_ref['id'], + {'status': 'error'}) + + self.db.share_snapshot_update(context, + snapshot_ref['id'], + {'status': 'available', + 'progress': '100%'}) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Delete share snapshot.""" + snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) + + try: + self.driver.delete_snapshot(context, snapshot_ref) + except exception.SnapshotIsBusy: + self.db.share_snapshot_update(context, snapshot_ref['id'], + {'status': 'available'}) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_snapshot_update(context, snapshot_ref['id'], + {'status': 'error_deleting'}) + else: + self.db.share_snapshot_destroy(context, snapshot_id) + + def allow_access(self, context, access_id): + """Allow access to some share.""" + try: + access_ref = self.db.share_access_get(context, access_id) + share_ref = self.db.share_get(context, access_ref['share_id']) + if access_ref['state'] == access_ref.STATE_NEW: + self.driver.allow_access(context, share_ref, access_ref) + self.db.share_access_update( + context, access_id, {'state': access_ref.STATE_ACTIVE}) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_access_update( + context, access_id, {'state': access_ref.STATE_ERROR}) + + def deny_access(self, context, access_id): + """Deny access to some share.""" + access_ref = self.db.share_access_get(context, access_id) + share_ref = self.db.share_get(context, access_ref['share_id']) + self._deny_access(context, access_ref, share_ref) + + def _deny_access(self, context, access_ref, share_ref): + access_id = access_ref['id'] + try: + self.driver.deny_access(context, share_ref, access_ref) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.share_access_update( + context, access_id, {'state': access_ref.STATE_ERROR}) + self.db.share_access_delete(context, access_id) + + @manager.periodic_task + def _report_driver_status(self, context): + LOG.info(_('Updating share status')) + share_stats = self.driver.get_share_stats(refresh=True) + if share_stats: + self.update_service_capabilities(share_stats) + + def publish_service_capabilities(self, context): + """Collect driver status and then publish it.""" + self._report_driver_status(context) + self._publish_service_capabilities(context) diff --git a/cinder/share/rpcapi.py b/cinder/share/rpcapi.py new file mode 100644 index 0000000000..4347ebc7b3 --- /dev/null +++ b/cinder/share/rpcapi.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Intel, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the share RPC API. +""" + +from cinder import exception +from cinder import flags +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +FLAGS = flags.FLAGS + + +class ShareAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the share rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Add snapshot support. + 1.2 - Add filter scheduler support + ''' + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic=None): + super(ShareAPI, self).__init__( + topic=topic or FLAGS.share_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_share(self, ctxt, share, host, + request_spec, filter_properties, + snapshot_id=None): + self.cast(ctxt, + self.make_msg('create_share', + share_id=share['id'], + request_spec=request_spec, + filter_properties=filter_properties, + snapshot_id=snapshot_id), + topic=rpc.queue_get_for(ctxt, + self.topic, + host)) + + def delete_share(self, ctxt, share): + self.cast(ctxt, + self.make_msg('delete_share', + share_id=share['id']), + topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) + + def create_snapshot(self, ctxt, share, snapshot): + self.cast(ctxt, + self.make_msg('create_snapshot', + share_id=share['id'], + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, share['host'])) + + def delete_snapshot(self, ctxt, snapshot, host): + self.cast(ctxt, + self.make_msg('delete_snapshot', + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, host)) + + def allow_access(self, ctxt, share, access): + self.cast(ctxt, self.make_msg('allow_access', access_id=access['id']), + topic=rpc.queue_get_for(ctxt, + self.topic, + share['host'])) + + def deny_access(self, ctxt, share, access): + self.cast(ctxt, self.make_msg('deny_access', access_id=access['id']), + topic=rpc.queue_get_for(ctxt, + self.topic, + share['host'])) + + def publish_service_capabilities(self, ctxt): + self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), + version='1.0') diff --git a/cinder/test.py b/cinder/test.py new file mode 100644 index 0000000000..967eadca37 --- /dev/null +++ b/cinder/test.py @@ -0,0 +1,285 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base classes for our unit tests. + +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" + +import functools +import unittest +import uuid + +import mox +import nose.plugins.skip +from oslo.config import cfg +import stubout + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import service +from cinder import tests +from cinder.tests import fake_flags + +test_opts = [ + cfg.StrOpt('sqlite_clean_db', + default='clean.sqlite', + help='File name of clean sqlite db'), + cfg.BoolOpt('fake_tests', + default=True, + help='should we use everything for testing'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(test_opts) + +LOG = logging.getLogger(__name__) + + +class skip_test(object): + """Decorator that skips a test.""" + # TODO(tr3buchet): remember forever what comstud did here + def __init__(self, msg): + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + raise nose.SkipTest(self.message) + return _skipper + + +class skip_if(object): + """Decorator that skips a test if condition is true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +class skip_unless(object): + """Decorator that skips a test if condition is not true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if not self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +def skip_if_fake(func): + """Decorator that skips a test if running in fake mode.""" + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if FLAGS.fake_tests: + raise unittest.SkipTest('Test cannot be run in fake mode') + else: + return func(*args, **kw) + return _skipper + + +class TestingException(Exception): + pass + + +class TestCase(unittest.TestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + """Run before each test method to initialize test environment.""" + super(TestCase, self).setUp() + + fake_flags.set_defaults(FLAGS) + flags.parse_args([], default_config_files=[]) + + # NOTE(vish): We need a better method for creating fixtures for tests + # now that we have some required db setup for the system + # to work properly. + self.start = timeutils.utcnow() + tests.reset_db() + + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = stubout.StubOutForTesting() + self.injected = [] + self._services = [] + FLAGS.set_override('fatal_exception_format_errors', True) + + def tearDown(self): + """Runs after each test method to tear down test environment.""" + try: + self.mox.UnsetStubs() + self.stubs.UnsetAll() + self.stubs.SmartUnsetAll() + self.mox.VerifyAll() + super(TestCase, self).tearDown() + finally: + # Reset any overridden flags + FLAGS.reset() + + # Stop any timers + for x in self.injected: + try: + x.stop() + except AssertionError: + pass + + # Kill any services + for x in self._services: + try: + x.kill() + except Exception: + pass + + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + + def flags(self, **kw): + """Override flag variables for a test.""" + for k, v in kw.iteritems(): + FLAGS.set_override(k, v) + + def start_service(self, name, host=None, **kwargs): + host = host and host or uuid.uuid4().hex + kwargs.setdefault('host', host) + kwargs.setdefault('binary', 'cinder-%s' % name) + svc = service.Service.create(**kwargs) + svc.start() + self._services.append(svc) + return svc + + # Useful assertions + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): + """Assert two dicts are equivalent. + + This is a 'deep' match in the sense that it handles nested + dictionaries appropriately. + + NOTE: + + If you don't care (or don't know) a given value, you can specify + the string DONTCARE as the value. This will cause that dict-item + to be skipped. + + """ + def raise_assertion(msg): + d1str = str(d1) + d2str = str(d2) + base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' + 'd2: %(d2str)s' % locals()) + raise AssertionError(base_msg) + + d1keys = set(d1.keys()) + d2keys = set(d2.keys()) + if d1keys != d2keys: + d1only = d1keys - d2keys + d2only = d2keys - d1keys + raise_assertion('Keys in d1 and not d2: %(d1only)s. ' + 'Keys in d2 and not d1: %(d2only)s' % locals()) + + for key in d1keys: + d1value = d1[key] + d2value = d2[key] + try: + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance + except (ValueError, TypeError): + # If both values aren't convertable to float, just ignore + # ValueError if arg is a str, TypeError if it's something else + # (like None) + within_tolerance = False + + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): + self.assertDictMatch(d1value, d2value) + elif 'DONTCARE' in (d1value, d2value): + continue + elif approx_equal and within_tolerance: + continue + elif d1value != d2value: + raise_assertion("d1['%(key)s']=%(d1value)s != " + "d2['%(key)s']=%(d2value)s" % locals()) + + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): + """Assert a list of dicts are equivalent.""" + def raise_assertion(msg): + L1str = str(L1) + L2str = str(L2) + base_msg = ('List of dictionaries do not match: %(msg)s ' + 'L1: %(L1str)s L2: %(L2str)s' % locals()) + raise AssertionError(base_msg) + + L1count = len(L1) + L2count = len(L2) + if L1count != L2count: + raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' + 'len(L2)=%(L2count)d' % locals()) + + for d1, d2 in zip(L1, L2): + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) + + def assertSubDictMatch(self, sub_dict, super_dict): + """Assert a sub_dict is subset of super_dict.""" + self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) + for k, sub_value in sub_dict.items(): + super_value = super_dict[k] + if isinstance(sub_value, dict): + self.assertSubDictMatch(sub_value, super_value) + elif 'DONTCARE' in (sub_value, super_value): + continue + else: + self.assertEqual(sub_value, super_value) + + def assertIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' in 'b'""" + try: + f = super(TestCase, self).assertIn + except AttributeError: + self.assertTrue(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) + + def assertNotIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" + try: + f = super(TestCase, self).assertNotIn + except AttributeError: + self.assertFalse(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) diff --git a/cinder/testing/README.rst b/cinder/testing/README.rst new file mode 100644 index 0000000000..9a9d314621 --- /dev/null +++ b/cinder/testing/README.rst @@ -0,0 +1,66 @@ +===================================== +OpenStack Cinder Testing Infrastructure +===================================== + +A note of clarification is in order, to help those who are new to testing in +OpenStack cinder: + +- actual unit tests are created in the "tests" directory; +- the "testing" directory is used to house the infrastructure needed to support + testing in OpenStack Cinder. + +This README file attempts to provide current and prospective contributors with +everything they need to know in order to start creating unit tests and +utilizing the convenience code provided in cinder.testing. + +Note: the content for the rest of this file will be added as the work items in +the following blueprint are completed: + https://blueprints.launchpad.net/cinder/+spec/consolidate-testing-infrastructure + + +Test Types: Unit vs. Functional vs. Integration +----------------------------------------------- + +TBD + +Writing Unit Tests +------------------ + +TBD + +Using Fakes +~~~~~~~~~~~ + +TBD + +test.TestCase +------------- +The TestCase class from cinder.test (generally imported as test) will +automatically manage self.stubs using the stubout module and self.mox +using the mox module during the setUp step. They will automatically +verify and clean up during the tearDown step. + +If using test.TestCase, calling the super class setUp is required and +calling the super class tearDown is required to be last if tearDown +is overridden. + +Writing Functional Tests +------------------------ + +TBD + +Writing Integration Tests +------------------------- + +TBD + +Tests and assertRaises +---------------------- +When asserting that a test should raise an exception, test against the +most specific exception possible. An overly broad exception type (like +Exception) can mask errors in the unit test itself. + +Example:: + + self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, + elevated, instance_uuid) diff --git a/cinder/tests/__init__.py b/cinder/tests/__init__.py new file mode 100644 index 0000000000..5027886ad6 --- /dev/null +++ b/cinder/tests/__init__.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.tests` -- Cinder Unittests +===================================================== + +.. automodule:: cinder.tests + :platform: Unix +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +import eventlet +eventlet.monkey_patch() + +# See http://code.google.com/p/python-nose/issues/detail?id=373 +# The code below enables nosetests to work with i18n _() blocks +import __builtin__ +setattr(__builtin__, '_', lambda x: x) +import os +import shutil + +from cinder.db.sqlalchemy.session import get_engine +from cinder import flags + +FLAGS = flags.FLAGS + +_DB = None + + +def reset_db(): + if FLAGS.sql_connection == "sqlite://": + engine = get_engine() + engine.dispose() + conn = engine.connect() + conn.connection.executescript(_DB) + else: + shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), + os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) + + +def setup(): + import mox # Fail fast if you don't have mox. Workaround for bug 810424 + + from cinder.db import migration + from cinder.tests import fake_flags + fake_flags.set_defaults(FLAGS) + + if FLAGS.sql_connection == "sqlite://": + if migration.db_version() > 1: + return + else: + testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) + if os.path.exists(testdb): + return + migration.db_sync() + + if FLAGS.sql_connection == "sqlite://": + global _DB + engine = get_engine() + conn = engine.connect() + _DB = "".join(line for line in conn.connection.iterdump()) + else: + cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) + shutil.copyfile(testdb, cleandb) diff --git a/cinder/tests/api/__init__.py b/cinder/tests/api/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/api/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/common.py b/cinder/tests/api/common.py new file mode 100644 index 0000000000..e030105653 --- /dev/null +++ b/cinder/tests/api/common.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def compare_links(actual, expected): + """Compare xml atom links.""" + + return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) + + +def compare_media_types(actual, expected): + """Compare xml media types.""" + + return compare_tree_to_dict(actual, expected, ('base', 'type')) + + +def compare_tree_to_dict(actual, expected, keys): + """Compare parts of lxml.etree objects to dicts.""" + + for elem, data in zip(actual, expected): + for key in keys: + if elem.get(key) != data.get(key): + return False + return True diff --git a/cinder/tests/api/contrib/__init__.py b/cinder/tests/api/contrib/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/api/contrib/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/contrib/stubs.py b/cinder/tests/api/contrib/stubs.py new file mode 100644 index 0000000000..ff900785e6 --- /dev/null +++ b/cinder/tests/api/contrib/stubs.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import exception as exc + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +def stub_share(id, **kwargs): + share = { + 'id': id, + 'share_proto': 'fakeproto', + 'export_location': 'fake_location', + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'status': 'fakestatus', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': '2', + } + share.update(kwargs) + return share + + +def stub_snapshot(id, **kwargs): + snapshot = { + 'id': id, + 'share_id': 'fakeshareid', + 'share_proto': 'fakesnapproto', + 'export_location': 'fakesnaplocation', + 'user_id': 'fakesnapuser', + 'project_id': 'fakesnapproject', + 'host': 'fakesnaphost', + 'share_size': 1, + 'status': 'fakesnapstatus', + 'share_name': 'fakesharename', + 'display_name': 'displaysnapname', + 'display_description': 'displaysnapdesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + } + snapshot.update(kwargs) + return snapshot + + +def stub_share_get(self, context, share_id): + return stub_share(share_id) + + +def stub_share_get_notfound(self, context, share_id): + raise exc.NotFound + + +def stub_share_create(self, context, share_proto, size, name, description, + **param): + share = stub_share('1') + share['status'] = 'creating' + share['share_proto'] = share_proto + share['size'] = size + share['display_name'] = name + share['display_description'] = description + return share + + +def stub_share_delete(self, context, *args, **param): + pass + + +def stub_share_update(self, context, *args, **param): + pass + + +def stub_share_get_all_by_project(self, context, search_opts=None): + return [stub_share_get(self, context, '1')] + + +def stub_get_all_shares(self, context): + return [stub_share(100, project_id='fake'), + stub_share(101, project_id='superfake'), + stub_share(102, project_id='superduperfake')] + + +def stub_snapshot_get(self, context, snapshot_id): + return stub_snapshot(snapshot_id) + + +def stub_snapshot_get_notfound(self, context, snapshot_id): + raise exc.NotFound + + +def stub_snapshot_create(self, context, share, display_name, + display_description): + return stub_snapshot(200, + share_id=share['id'], + display_name=display_name, + display_description=display_description) + + +def stub_snapshot_delete(self, context, *args, **param): + pass + + +def stub_snapshot_get_all_by_project(self, context, search_opts=None): + return [stub_snapshot_get(self, context, 2)] diff --git a/cinder/tests/api/contrib/test_admin_actions.py b/cinder/tests/api/contrib/test_admin_actions.py new file mode 100644 index 0000000000..d6ae606f30 --- /dev/null +++ b/cinder/tests/api/contrib/test_admin_actions.py @@ -0,0 +1,348 @@ +import shutil +import tempfile +import webob + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.volume import api as volume_api + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class AdminActionsTest(test.TestCase): + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(AdminActionsTest, self).setUp() + self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') + self.flags(lock_path=self.tempdir) + self.volume_api = volume_api.API() + + def tearDown(self): + shutil.rmtree(self.tempdir) + + def test_reset_status_as_admin(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'error' + self.assertEquals(volume['status'], 'error') + + def test_reset_status_as_non_admin(self): + # current status is 'error' + volume = db.volume_create(context.get_admin_context(), + {'status': 'error'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request changing status to available + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'available'}}) + # non-admin context + req.environ['cinder.context'] = context.RequestContext('fake', 'fake') + resp = req.get_response(app()) + # request is not authorized + self.assertEquals(resp.status_int, 403) + volume = db.volume_get(context.get_admin_context(), volume['id']) + # status is still 'error' + self.assertEquals(volume['status'], 'error') + + def test_malformed_reset_status_body(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # malformed request body + req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEquals(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status is still 'available' + self.assertEquals(volume['status'], 'available') + + def test_invalid_status_for_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'invalid' is not a valid status + req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEquals(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status is still 'available' + self.assertEquals(volume['status'], 'available') + + def test_reset_status_for_missing_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # missing-volume-id + req = webob.Request.blank('/v2/fake/volumes/%s/action' % + 'missing-volume-id') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # malformed request body + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'available'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # not found + self.assertEquals(resp.status_int, 404) + self.assertRaises(exception.NotFound, db.volume_get, ctx, + 'missing-volume-id') + + def test_reset_attached_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', + 'attach_status': 'attached'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request update attach_status to detached + body = {'os-reset_status': {'status': 'available', + 'attach_status': 'detached'}} + req.body = jsonutils.dumps(body) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # attach_status changed to 'detached' + self.assertEquals(volume['attach_status'], 'detached') + # status un-modified + self.assertEquals(volume['status'], 'available') + + def test_invalid_reset_attached_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', + 'attach_status': 'detached'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'invalid' is not a valid attach_status + body = {'os-reset_status': {'status': 'available', + 'attach_status': 'invalid'}} + req.body = jsonutils.dumps(body) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEquals(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status and attach_status un-modified + self.assertEquals(volume['status'], 'available') + self.assertEquals(volume['attach_status'], 'detached') + + def test_snapshot_reset_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # snapshot in 'error_deleting' + volume = db.volume_create(ctx, {}) + snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', + 'volume_id': volume['id']}) + req = webob.Request.blank('/v2/fake/snapshots/%s/action' % + snapshot['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + snapshot = db.snapshot_get(ctx, snapshot['id']) + # status changed to 'error' + self.assertEquals(snapshot['status'], 'error') + + def test_invalid_status_for_snapshot(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # snapshot in 'available' + volume = db.volume_create(ctx, {}) + snapshot = db.snapshot_create(ctx, {'status': 'available', + 'volume_id': volume['id']}) + req = webob.Request.blank('/v2/fake/snapshots/%s/action' % + snapshot['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'attaching' is not a valid status for snapshots + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'attaching'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 400) + snapshot = db.snapshot_get(ctx, snapshot['id']) + # status is still 'available' + self.assertEquals(snapshot['status'], 'available') + + def test_force_delete(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is creating + volume = db.volume_create(ctx, {'status': 'creating'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-force_delete': {}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + # volume is deleted + self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id']) + + def test_force_delete_snapshot(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is creating + volume = db.volume_create(ctx, {'host': 'test'}) + snapshot = db.snapshot_create(ctx, {'status': 'creating', + 'volume_size': 1, + 'volume_id': volume['id']}) + path = '/v2/fake/snapshots/%s/action' % snapshot['id'] + req = webob.Request.blank(path) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-force_delete': {}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # start service to handle rpc.cast for 'delete snapshot' + svc = self.start_service('volume', host='test') + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + # snapshot is deleted + self.assertRaises(exception.NotFound, db.snapshot_get, ctx, + snapshot['id']) + # cleanup + svc.stop() + + def test_force_detach_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + self.volume_api.initialize_connection(ctx, volume, {}) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) + # volume is attached + volume = db.volume_get(ctx, volume['id']) + self.assertEquals(volume['status'], 'in-use') + self.assertEquals(volume['instance_uuid'], stubs.FAKE_UUID) + self.assertEquals(volume['mountpoint'], mountpoint) + self.assertEquals(volume['attach_status'], 'attached') + # build request to force detach + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-force_detach': None}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEquals(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'available' + self.assertEquals(volume['status'], 'available') + self.assertEquals(volume['instance_uuid'], None) + self.assertEquals(volume['mountpoint'], None) + self.assertEquals(volume['attach_status'], 'detached') + # cleanup + svc.stop() + + def test_attach_in_use_volume(self): + """Test that attaching to an in-use volume fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + self.volume_api.initialize_connection(ctx, volume, {}) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + fakes.get_fake_uuid(), + mountpoint) + # cleanup + svc.stop() + + def test_attach_attaching_volume_with_different_instance(self): + """Test that attaching volume reserved for another instance fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': ''}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.initialize_connection(ctx, volume, {}) + values = {'status': 'attaching', + 'instance_uuid': fakes.get_fake_uuid()} + db.volume_update(ctx, volume['id'], values) + mountpoint = '/dev/vbd' + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + stubs.FAKE_UUID, + mountpoint) + # cleanup + svc.stop() diff --git a/cinder/tests/api/contrib/test_backups.py b/cinder/tests/api/contrib/test_backups.py new file mode 100644 index 0000000000..899814a70d --- /dev/null +++ b/cinder/tests/api/contrib/test_backups.py @@ -0,0 +1,860 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for Backup code. +""" + +import json +from xml.dom import minidom + +import webob + +# needed for stubs to work +import cinder.backup +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes +# needed for stubs to work +import cinder.volume + + +LOG = logging.getLogger(__name__) + + +class BackupsAPITestCase(test.TestCase): + """Test Case for backups API.""" + + def setUp(self): + super(BackupsAPITestCase, self).setUp() + + def tearDown(self): + super(BackupsAPITestCase, self).tearDown() + + @staticmethod + def _create_backup(volume_id=1, + display_name='test_backup', + display_description='this is a test backup', + container='volumebackups', + status='creating', + size=0, object_count=0): + """Create a backup object.""" + backup = {} + backup['volume_id'] = volume_id + backup['user_id'] = 'fake' + backup['project_id'] = 'fake' + backup['host'] = 'testhost' + backup['availability_zone'] = 'az1' + backup['display_name'] = display_name + backup['display_description'] = display_description + backup['container'] = container + backup['status'] = status + backup['fail_reason'] = '' + backup['size'] = size + backup['object_count'] = object_count + return db.backup_create(context.get_admin_context(), backup)['id'] + + @staticmethod + def _get_backup_attrib(backup_id, attrib_name): + return db.backup_get(context.get_admin_context(), + backup_id)[attrib_name] + + @staticmethod + def _create_volume(display_name='test_volume', + display_description='this is a test volume', + status='creating', + size=1): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['status'] = status + vol['display_name'] = display_name + vol['display_description'] = display_description + vol['attach_status'] = 'detached' + return db.volume_create(context.get_admin_context(), vol)['id'] + + def test_show_backup(self): + volume_id = self._create_volume(size=5) + backup_id = self._create_backup(volume_id) + LOG.debug('Created backup with id %s' % backup_id) + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['backup']['availability_zone'], 'az1') + self.assertEqual(res_dict['backup']['container'], 'volumebackups') + self.assertEqual(res_dict['backup']['description'], + 'this is a test backup') + self.assertEqual(res_dict['backup']['name'], 'test_backup') + self.assertEqual(res_dict['backup']['id'], backup_id) + self.assertEqual(res_dict['backup']['object_count'], 0) + self.assertEqual(res_dict['backup']['size'], 0) + self.assertEqual(res_dict['backup']['status'], 'creating') + self.assertEqual(res_dict['backup']['volume_id'], volume_id) + + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_backup_xml_content_type(self): + volume_id = self._create_volume(size=5) + backup_id = self._create_backup(volume_id) + req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup = dom.getElementsByTagName('backup') + name = backup.item(0).getAttribute('name') + container_name = backup.item(0).getAttribute('container') + self.assertEquals(container_name.strip(), "volumebackups") + self.assertEquals(name.strip(), "test_backup") + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_backup_with_backup_NotFound(self): + req = webob.Request.blank('/v2/fake/backups/9999') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + def test_list_backups_json(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['backups'][0]), 3) + self.assertEqual(res_dict['backups'][0]['id'], backup_id1) + self.assertEqual(res_dict['backups'][0]['name'], 'test_backup') + self.assertEqual(len(res_dict['backups'][1]), 3) + self.assertEqual(res_dict['backups'][1]['id'], backup_id2) + self.assertEqual(res_dict['backups'][1]['name'], 'test_backup') + self.assertEqual(len(res_dict['backups'][2]), 3) + self.assertEqual(res_dict['backups'][2]['id'], backup_id3) + self.assertEqual(res_dict['backups'][2]['name'], 'test_backup') + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_xml(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup_list = dom.getElementsByTagName('backup') + + self.assertEqual(backup_list.item(0).attributes.length, 2) + self.assertEqual(backup_list.item(0).getAttribute('id'), + backup_id1) + self.assertEqual(backup_list.item(1).attributes.length, 2) + self.assertEqual(backup_list.item(1).getAttribute('id'), + backup_id2) + self.assertEqual(backup_list.item(2).attributes.length, 2) + self.assertEqual(backup_list.item(2).getAttribute('id'), + backup_id3) + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_detail_json(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['backups'][0]), 12) + self.assertEqual(res_dict['backups'][0]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][0]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][0]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][0]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][0]['id'], backup_id1) + self.assertEqual(res_dict['backups'][0]['object_count'], 0) + self.assertEqual(res_dict['backups'][0]['size'], 0) + self.assertEqual(res_dict['backups'][0]['status'], 'creating') + self.assertEqual(res_dict['backups'][0]['volume_id'], '1') + + self.assertEqual(len(res_dict['backups'][1]), 12) + self.assertEqual(res_dict['backups'][1]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][1]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][1]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][1]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][1]['id'], backup_id2) + self.assertEqual(res_dict['backups'][1]['object_count'], 0) + self.assertEqual(res_dict['backups'][1]['size'], 0) + self.assertEqual(res_dict['backups'][1]['status'], 'creating') + self.assertEqual(res_dict['backups'][1]['volume_id'], '1') + + self.assertEqual(len(res_dict['backups'][2]), 12) + self.assertEqual(res_dict['backups'][2]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][2]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][2]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][2]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][2]['id'], backup_id3) + self.assertEqual(res_dict['backups'][2]['object_count'], 0) + self.assertEqual(res_dict['backups'][2]['size'], 0) + self.assertEqual(res_dict['backups'][2]['status'], 'creating') + self.assertEqual(res_dict['backups'][2]['volume_id'], '1') + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_detail_xml(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup_detail = dom.getElementsByTagName('backup') + + self.assertEqual(backup_detail.item(0).attributes.length, 11) + self.assertEqual( + backup_detail.item(0).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(0).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(0).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(0).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(0).getAttribute('id'), backup_id1) + self.assertEqual( + int(backup_detail.item(0).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(0).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(0).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(0).getAttribute('volume_id')), 1) + + self.assertEqual(backup_detail.item(1).attributes.length, 11) + self.assertEqual( + backup_detail.item(1).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(1).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(1).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(1).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(1).getAttribute('id'), backup_id2) + self.assertEqual( + int(backup_detail.item(1).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(1).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(1).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(1).getAttribute('volume_id')), 1) + + self.assertEqual(backup_detail.item(2).attributes.length, 11) + self.assertEqual( + backup_detail.item(2).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(2).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(2).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(2).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(2).getAttribute('id'), backup_id3) + self.assertEqual( + int(backup_detail.item(2).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(2).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(2).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(2).getAttribute('volume_id')), 1) + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_create_backup_json(self): + volume_id = self._create_volume(status='available', size=5) + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + LOG.info(res_dict) + + self.assertEqual(res.status_int, 202) + self.assertTrue('id' in res_dict['backup']) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_backup_xml(self): + volume_size = 2 + volume_id = self._create_volume(status='available', size=volume_size) + + req = webob.Request.blank('/v2/fake/backups') + req.body = ('' % volume_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + backup = dom.getElementsByTagName('backup') + self.assertTrue(backup.item(0).hasAttribute('id')) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_backup_with_no_body(self): + # omit body from the request + req = webob.Request.blank('/v2/fake/backups') + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_create_backup_with_body_KeyError(self): + # omit volume_id from body + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Incorrect request body format') + + def test_create_backup_with_VolumeNotFound(self): + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": 9999, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 9999 could not be found.') + + def test_create_backup_with_InvalidVolume(self): + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='restoring', size=volume_size) + + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: Volume to be backed up must' + ' be available') + + def test_delete_backup_available(self): + backup_id = self._create_backup(status='available') + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + self.assertEqual(self._get_backup_attrib(backup_id, 'status'), + 'deleting') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_delete_backup_error(self): + backup_id = self._create_backup(status='error') + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + self.assertEqual(self._get_backup_attrib(backup_id, 'status'), + 'deleting') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_delete_backup_with_backup_NotFound(self): + req = webob.Request.blank('/v2/fake/backups/9999') + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + def test_delete_backup_with_InvalidBackup(self): + backup_id = self._create_backup() + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid backup: Backup status must be ' + 'available or error') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_volume_id_specified_json(self): + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + self.assertEqual(res_dict['restore']['volume_id'], volume_id) + + def test_restore_backup_volume_id_specified_xml(self): + backup_id = self._create_backup(status='available') + volume_size = 2 + volume_id = self._create_volume(status='available', size=volume_size) + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) + req.body = '' % volume_id + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + restore = dom.getElementsByTagName('restore') + self.assertEqual(restore.item(0).getAttribute('backup_id'), + backup_id) + self.assertEqual(restore.item(0).getAttribute('volume_id'), volume_id) + + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_restore_backup_with_no_body(self): + # omit body from the request + backup_id = self._create_backup(status='available') + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_body_KeyError(self): + # omit restore from body + backup_id = self._create_backup(status='available') + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) + body = {"": {}} + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_restore_backup_volume_id_unspecified(self): + + # intercept volume creation to ensure created volume + # has status of available + def fake_volume_api_create(cls, context, size, name, description): + volume_id = self._create_volume(status='available', size=size) + return db.volume_get(context, volume_id) + + self.stubs.Set(cinder.volume.API, 'create', + fake_volume_api_create) + + backup_id = self._create_backup(size=5, status='available') + + body = {"restore": {}} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + + def test_restore_backup_with_InvalidInput(self): + + def fake_backup_api_restore_throwing_InvalidInput(cls, context, + backup_id, + volume_id): + msg = _("Invalid input") + raise exception.InvalidInput(reason=msg) + + self.stubs.Set(cinder.backup.API, 'restore', + fake_backup_api_restore_throwing_InvalidInput) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 0 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid input received: Invalid input') + + def test_restore_backup_with_InvalidVolume(self): + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='attaching', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: Volume to be restored to must ' + 'be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_InvalidBackup(self): + backup_id = self._create_backup(status='restoring') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid backup: Backup status must be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_BackupNotFound(self): + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/9999/restore') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_restore_backup_with_VolumeNotFound(self): + backup_id = self._create_backup(status='available') + + body = {"restore": {"volume_id": "9999", }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 9999 could not be found.') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(self): + + def fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota( + cls, context, backup_id, volume_id): + raise exception.VolumeSizeExceedsAvailableQuota() + + self.stubs.Set( + cinder.backup.API, + 'restore', + fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Requested volume or snapshot exceeds allowed ' + 'Gigabytes quota') + + def test_restore_backup_with_VolumeLimitExceeded(self): + + def fake_backup_api_restore_throwing_VolumeLimitExceeded(cls, + context, + backup_id, + volume_id): + raise exception.VolumeLimitExceeded(allowed=1) + + self.stubs.Set(cinder.backup.API, 'restore', + fake_backup_api_restore_throwing_VolumeLimitExceeded) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Maximum number of volumes allowed ' + '(%(allowed)d) exceeded') + + def test_restore_backup_to_undersized_volume(self): + backup_size = 10 + backup_id = self._create_backup(status='available', size=backup_size) + # need to create the volume referenced below first + volume_size = 5 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: volume size %d is too ' + 'small to restore backup of size %d.' + % (volume_size, backup_size)) + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_to_oversized_volume(self): + backup_id = self._create_backup(status='available', size=10) + # need to create the volume referenced below first + volume_size = 15 + volume_id = self._create_volume(status='available', size=volume_size) + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + self.assertEqual(res_dict['restore']['volume_id'], volume_id) + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) diff --git a/cinder/tests/api/contrib/test_extended_snapshot_attributes.py b/cinder/tests/api/contrib/test_extended_snapshot_attributes.py new file mode 100644 index 0000000000..5937ac9628 --- /dev/null +++ b/cinder/tests/api/contrib/test_extended_snapshot_attributes.py @@ -0,0 +1,124 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.contrib import extended_snapshot_attributes +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +FLAGS = flags.FLAGS + + +UUID1 = '00000000-0000-0000-0000-000000000001' +UUID2 = '00000000-0000-0000-0000-000000000002' + + +def _get_default_snapshot_param(): + return {'id': UUID1, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake', + 'progress': '0%'} + + +def fake_snapshot_get(self, context, snapshot_id): + param = _get_default_snapshot_param() + return param + + +def fake_snapshot_get_all(self, context, search_opts=None): + param = _get_default_snapshot_param() + return [param] + + +class ExtendedSnapshotAttributesTest(test.TestCase): + content_type = 'application/json' + prefix = 'os-extended-snapshot-attributes:' + + def setUp(self): + super(ExtendedSnapshotAttributesTest, self).setUp() + self.stubs.Set(volume.api.API, 'get_snapshot', fake_snapshot_get) + self.stubs.Set(volume.api.API, 'get_all_snapshots', + fake_snapshot_get_all) + + def _make_request(self, url): + req = webob.Request.blank(url) + req.headers['Accept'] = self.content_type + res = req.get_response(fakes.wsgi_app()) + return res + + def _get_snapshot(self, body): + return jsonutils.loads(body).get('snapshot') + + def _get_snapshots(self, body): + return jsonutils.loads(body).get('snapshots') + + def assertSnapshotAttributes(self, snapshot, project_id, progress): + self.assertEqual(snapshot.get('%sproject_id' % self.prefix), + project_id) + self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress) + + def test_show(self): + url = '/v2/fake/snapshots/%s' % UUID2 + res = self._make_request(url) + + self.assertEqual(res.status_int, 200) + self.assertSnapshotAttributes(self._get_snapshot(res.body), + project_id='fake', + progress='0%') + + def test_detail(self): + url = '/v2/fake/snapshots/detail' + res = self._make_request(url) + + self.assertEqual(res.status_int, 200) + for i, snapshot in enumerate(self._get_snapshots(res.body)): + self.assertSnapshotAttributes(snapshot, + project_id='fake', + progress='0%') + + def test_no_instance_passthrough_404(self): + + def fake_snapshot_get(*args, **kwargs): + raise exception.InstanceNotFound(instance_id='fake') + + self.stubs.Set(volume.api.API, 'get_snapshot', fake_snapshot_get) + url = '/v2/fake/snapshots/70f6db34-de8d-4fbd-aafb-4065bdfa6115' + res = self._make_request(url) + + self.assertEqual(res.status_int, 404) + + +class ExtendedSnapshotAttributesXmlTest(ExtendedSnapshotAttributesTest): + content_type = 'application/xml' + ext = extended_snapshot_attributes + prefix = '{%s}' % ext.Extended_snapshot_attributes.namespace + + def _get_snapshot(self, body): + return etree.XML(body) + + def _get_snapshots(self, body): + return etree.XML(body).getchildren() diff --git a/cinder/tests/api/contrib/test_hosts.py b/cinder/tests/api/contrib/test_hosts.py new file mode 100644 index 0000000000..525f963051 --- /dev/null +++ b/cinder/tests/api/contrib/test_hosts.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob.exc + +from cinder.api.contrib import hosts as os_hosts +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import test + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) +created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) +curr_time = timeutils.utcnow() + +SERVICE_LIST = [ + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}] + +LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}] + + +def stub_service_get_all(self, req): + return SERVICE_LIST + + +class FakeRequest(object): + environ = {'cinder.context': context.get_admin_context()} + GET = {} + + +class FakeRequestWithcinderZone(object): + environ = {'cinder.context': context.get_admin_context()} + GET = {'zone': 'cinder'} + + +class HostTestCase(test.TestCase): + """Test Case for hosts.""" + + def setUp(self): + super(HostTestCase, self).setUp() + self.controller = os_hosts.HostController() + self.req = FakeRequest() + self.stubs.Set(db, 'service_get_all', + stub_service_get_all) + + def _test_host_update(self, host, key, val, expected_value): + body = {key: val} + result = self.controller.update(self.req, host, body=body) + self.assertEqual(result[key], expected_value) + + def test_list_hosts(self): + """Verify that the volume hosts are returned.""" + hosts = os_hosts._list_hosts(self.req) + self.assertEqual(hosts, LIST_RESPONSE) + + cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume') + expected = [host for host in LIST_RESPONSE + if host['service'] == 'cinder-volume'] + self.assertEqual(cinder_hosts, expected) + + def test_list_hosts_with_zone(self): + req = FakeRequestWithcinderZone() + hosts = os_hosts._list_hosts(req) + self.assertEqual(hosts, LIST_RESPONSE) + + def test_bad_status_value(self): + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body={'status': 'bad'}) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + self.req, + 'test.host.1', + body={'status': 'disablabc'}) + + def test_bad_update_key(self): + bad_body = {'crazy': 'bad'} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body=bad_body) + + def test_bad_update_key_and_correct_udpate_key(self): + bad_body = {'status': 'disable', 'crazy': 'bad'} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body=bad_body) + + def test_good_udpate_keys(self): + body = {'status': 'disable'} + self.assertRaises(NotImplementedError, self.controller.update, + self.req, 'test.host.1', body=body) + + def test_bad_host(self): + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + self.req, + 'bogus_host_name', + body={'disabled': 0}) + + def test_show_forbidden(self): + self.req.environ['cinder.context'].is_admin = False + dest = 'dummydest' + self.assertRaises(webob.exc.HTTPForbidden, + self.controller.show, + self.req, dest) + self.req.environ['cinder.context'].is_admin = True + + def test_show_host_not_exist(self): + """A host given as an argument does not exists.""" + self.req.environ['cinder.context'].is_admin = True + dest = 'dummydest' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + self.req, dest) + + +class HostSerializerTest(test.TestCase): + def setUp(self): + super(HostSerializerTest, self).setUp() + self.deserializer = os_hosts.HostDeserializer() + + def test_index_serializer(self): + serializer = os_hosts.HostIndexTemplate() + text = serializer.serialize({"hosts": LIST_RESPONSE}) + + tree = etree.fromstring(text) + + self.assertEqual('hosts', tree.tag) + self.assertEqual(len(LIST_RESPONSE), len(tree)) + for i in range(len(LIST_RESPONSE)): + self.assertEqual('host', tree[i].tag) + self.assertEqual(LIST_RESPONSE[i]['service-status'], + tree[i].get('service-status')) + self.assertEqual(LIST_RESPONSE[i]['service'], + tree[i].get('service')) + self.assertEqual(LIST_RESPONSE[i]['zone'], + tree[i].get('zone')) + self.assertEqual(LIST_RESPONSE[i]['service-state'], + tree[i].get('service-state')) + self.assertEqual(LIST_RESPONSE[i]['host_name'], + tree[i].get('host_name')) + self.assertEqual(str(LIST_RESPONSE[i]['last-update']), + tree[i].get('last-update')) + + def test_update_serializer_with_status(self): + exemplar = dict(host='test.host.1', status='enabled') + serializer = os_hosts.HostUpdateTemplate() + text = serializer.serialize(exemplar) + + tree = etree.fromstring(text) + + self.assertEqual('host', tree.tag) + for key, value in exemplar.items(): + self.assertEqual(value, tree.get(key)) + + def test_update_deserializer(self): + exemplar = dict(status='enabled', foo='bar') + intext = ("\n" + 'enabledbar') + result = self.deserializer.deserialize(intext) + + self.assertEqual(dict(body=exemplar), result) diff --git a/cinder/tests/api/contrib/test_services.py b/cinder/tests/api/contrib/test_services.py new file mode 100644 index 0000000000..1f823c356a --- /dev/null +++ b/cinder/tests/api/contrib/test_services.py @@ -0,0 +1,216 @@ +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.contrib import services +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import timeutils +from cinder import policy +from cinder import test +from cinder.tests.api import fakes +from datetime import datetime + + +fake_services_list = [{'binary': 'cinder-scheduler', + 'host': 'host1', + 'availability_zone': 'cinder', + 'id': 1, + 'disabled': True, + 'updated_at': datetime(2012, 10, 29, 13, 42, 2), + 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, + {'binary': 'cinder-volume', + 'host': 'host1', + 'availability_zone': 'cinder', + 'id': 2, + 'disabled': True, + 'updated_at': datetime(2012, 10, 29, 13, 42, 5), + 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, + {'binary': 'cinder-scheduler', + 'host': 'host2', + 'availability_zone': 'cinder', + 'id': 3, + 'disabled': False, + 'updated_at': datetime(2012, 9, 19, 6, 55, 34), + 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'availability_zone': 'cinder', + 'id': 4, + 'disabled': True, + 'updated_at': datetime(2012, 9, 18, 8, 3, 38), + 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, + ] + + +class FakeRequest(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {} + + +class FakeRequestWithSevice(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"service": "cinder-volume"} + + +class FakeRequestWithHost(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1"} + + +class FakeRequestWithHostService(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1", "service": "cinder-volume"} + + +def fake_servcie_get_all(context): + return fake_services_list + + +def fake_service_get_by_host_binary(context, host, binary): + for service in fake_services_list: + if service['host'] == host and service['binary'] == binary: + return service + return None + + +def fake_service_get_by_id(value): + for service in fake_services_list: + if service['id'] == value: + return service + return None + + +def fake_service_update(context, service_id, values): + service = fake_service_get_by_id(service_id) + if service is None: + raise exception.ServiceNotFound(service_id=service_id) + else: + {'host': 'host1', 'service': 'cinder-volume', + 'disabled': values['disabled']} + + +def fake_policy_enforce(context, action, target): + pass + + +def fake_utcnow(): + return datetime(2012, 10, 29, 13, 42, 11) + + +class ServicesTest(test.TestCase): + + def setUp(self): + super(ServicesTest, self).setUp() + + self.stubs.Set(db, "service_get_all", fake_servcie_get_all) + self.stubs.Set(timeutils, "utcnow", fake_utcnow) + self.stubs.Set(db, "service_get_by_args", + fake_service_get_by_host_binary) + self.stubs.Set(db, "service_update", fake_service_update) + self.stubs.Set(policy, "enforce", fake_policy_enforce) + + self.context = context.get_admin_context() + self.controller = services.ServiceController() + + def tearDown(self): + super(ServicesTest, self).tearDown() + + def test_services_list(self): + req = FakeRequest() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, + {'binary': 'cinder-volume', + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, + {'binary': 'cinder-scheduler', 'host': 'host2', + 'zone': 'cinder', + 'status': 'enabled', 'state': 'up', + 'updated_at': datetime(2012, 9, 19, 6, 55, 34)}, + {'binary': 'cinder-volume', 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host(self): + req = FakeRequestWithHost() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, + 29, 13, 42, 2)}, + {'binary': 'cinder-volume', 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_service(self): + req = FakeRequestWithSevice() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 9, 18, + 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host_service(self): + req = FakeRequestWithHostService() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_enable(self): + body = {'host': 'host1', 'service': 'cinder-volume'} + req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') + res_dict = self.controller.update(req, "enable", body) + + self.assertEqual(res_dict['disabled'], False) + + def test_services_disable(self): + req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') + body = {'host': 'host1', 'service': 'cinder-volume'} + res_dict = self.controller.update(req, "disable", body) + + self.assertEqual(res_dict['disabled'], True) diff --git a/cinder/tests/api/contrib/test_share_actions.py b/cinder/tests/api/contrib/test_share_actions.py new file mode 100644 index 0000000000..20bc3a3027 --- /dev/null +++ b/cinder/tests/api/contrib/test_share_actions.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import uuid +import webob + +from cinder.api.contrib import share_actions +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common.rpc import common as rpc_common +from cinder import share +from cinder.share import api as share_api +from cinder import test +from cinder.tests.api.contrib import stubs +from cinder.tests.api import fakes + + +FLAGS = flags.FLAGS + + +def _fake_access_get(self, ctxt, access_id): + + class Access(object): + def __init__(self, **kwargs): + self.STATE_NEW = 'fake_new' + self.STATE_ACTIVE = 'fake_active' + self.STATE_ERROR = 'fake_error' + self.params = kwargs + self.params['state'] = self.STATE_NEW + self.share_id = kwargs.get('share_id') + self.id = access_id + + def __getitem__(self, item): + return self.params[item] + + access = Access(access_id=access_id, share_id='fake_share_id') + return access + + +class ShareActionsTest(test.TestCase): + def setUp(self): + super(ShareActionsTest, self).setUp() + self.controller = share_actions.ShareActionsController() + + self.stubs.Set(share_api.API, 'get', stubs.stub_share_get) + + def test_allow_access(self): + def _stub_allow_access(*args, **kwargs): + pass + self.stubs.Set(share_api.API, "allow_access", _stub_allow_access) + + id = 'fake_share_id' + body = {"os-allow_access": {"access_type": 'fakeip', + "access_to": '127.0.0.1'}} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + res = self.controller._allow_access(req, id, body) + self.assertEqual(res.status_int, 202) + + def test_deny_access(self): + def _stub_deny_access(*args, **kwargs): + pass + + self.stubs.Set(share_api.API, "deny_access", _stub_deny_access) + self.stubs.Set(share_api.API, "access_get", _fake_access_get) + + id = 'fake_share_id' + body = {"os-deny_access": {"access_id": 'fake_acces_id'}} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + res = self.controller._deny_access(req, id, body) + self.assertEqual(res.status_int, 202) + + def test_deny_access_not_found(self): + def _stub_deny_access(*args, **kwargs): + pass + + self.stubs.Set(share_api.API, "deny_access", _stub_deny_access) + self.stubs.Set(share_api.API, "access_get", _fake_access_get) + + id = 'super_fake_share_id' + body = {"os-deny_access": {"access_id": 'fake_acces_id'}} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller._deny_access, + req, + id, + body) + + def test_access_list(self): + def _fake_access_get_all(*args, **kwargs): + return [{"state": "fakestatus", + "id": "fake_share_id", + "access_type": "fakeip", + "access_to": "127.0.0.1"}] + + self.stubs.Set(share_api.API, "access_get_all", _fake_access_get_all) + id = 'fake_share_id' + body = {"os-access_list": None} + req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) + res_dict = self.controller._access_list(req, id, body) + expected = _fake_access_get_all() + self.assertEqual(res_dict['access_list'], expected) diff --git a/cinder/tests/api/contrib/test_share_snapshots.py b/cinder/tests/api/contrib/test_share_snapshots.py new file mode 100644 index 0000000000..bc9d2ae94f --- /dev/null +++ b/cinder/tests/api/contrib/test_share_snapshots.py @@ -0,0 +1,190 @@ +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import webob + +from cinder.api.contrib import share_snapshots +from cinder import exception +from cinder.share import api as share_api +from cinder import test +from cinder.tests.api.contrib import stubs +from cinder.tests.api import fakes + + +class ShareSnapshotApiTest(test.TestCase): + """Share Snapshot Api Test.""" + + def setUp(self): + super(ShareSnapshotApiTest, self).setUp() + self.controller = share_snapshots.ShareSnapshotsController() + + self.stubs.Set(share_api.API, 'get', stubs.stub_share_get) + self.stubs.Set(share_api.API, 'get_all_snapshots', + stubs.stub_snapshot_get_all_by_project) + self.stubs.Set(share_api.API, 'get_snapshot', + stubs.stub_snapshot_get) + + self.maxDiff = None + + def test_snapshot_create(self): + self.stubs.Set(share_api.API, 'create_snapshot', + stubs.stub_snapshot_create) + body = { + 'share-snapshot': { + 'share_id': 100, + 'force': False, + 'name': 'fake_share_name', + 'description': 'fake_share_description', + } + } + req = fakes.HTTPRequest.blank('/share-snapshots') + res_dict = self.controller.create(req, body) + expected = { + 'share-snapshot': { + 'id': 200, + 'name': 'fake_share_name', + 'links': [ + { + 'href': 'http://localhost/v1/fake/share-snapshots/200', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/share-snapshots/200', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_snapshot_create_no_body(self): + body = {} + req = fakes.HTTPRequest.blank('/share-snapshots') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, + req, + body) + + def test_snapshot_delete(self): + self.stubs.Set(share_api.API, 'delete_snapshot', + stubs.stub_snapshot_delete) + req = fakes.HTTPRequest.blank('/share-snapshots/200') + resp = self.controller.delete(req, 200) + self.assertEqual(resp.status_int, 202) + + def test_snapshot_delete_nofound(self): + self.stubs.Set(share_api.API, 'get_snapshot', + stubs.stub_snapshot_get_notfound) + req = fakes.HTTPRequest.blank('/share-snapshots/200') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 200) + + def test_snapshot_show(self): + req = fakes.HTTPRequest.blank('/share-snapshots/200') + res_dict = self.controller.show(req, 200) + expected = { + 'share-snapshot': { + 'id': 200, + 'share_id': 'fakeshareid', + 'share_size': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'fakesnapstatus', + 'name': 'displaysnapname', + 'description': 'displaysnapdesc', + 'share_proto': 'fakesnapproto', + 'export_location': 'fakesnaplocation', + 'links': [ + { + 'href': 'http://localhost/v1/fake/share-snapshots/200', + 'rel': 'self', + }, + { + 'href': 'http://localhost/fake/share-snapshots/200', + 'rel': 'bookmark', + }, + ], + } + } + self.assertEqual(res_dict, expected) + + def test_snapshot_show_nofound(self): + self.stubs.Set(share_api.API, 'get_snapshot', + stubs.stub_snapshot_get_notfound) + req = fakes.HTTPRequest.blank('/share-snapshots/200') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, '200') + + def test_snapshot_list_summary(self): + self.stubs.Set(share_api.API, 'get_all_snapshots', + stubs.stub_snapshot_get_all_by_project) + req = fakes.HTTPRequest.blank('/share-snapshots') + res_dict = self.controller.index(req) + expected = { + 'share-snapshots': [ + { + 'name': 'displaysnapname', + 'id': 2, + 'links': [ + { + 'href': 'http://localhost/v1/fake/' + 'share-snapshots/2', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/share-snapshots/2', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_snapshot_list_detail(self): + env = {'QUERY_STRING': 'name=Share+Test+Name'} + req = fakes.HTTPRequest.blank('/shares/detail', environ=env) + res_dict = self.controller.detail(req) + expected = { + 'share-snapshots': [ + { + 'id': 2, + 'share_id': 'fakeshareid', + 'share_size': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'fakesnapstatus', + 'name': 'displaysnapname', + 'description': 'displaysnapdesc', + 'share_proto': 'fakesnapproto', + 'export_location': 'fakesnaplocation', + 'links': [ + { + 'href': 'http://localhost/v1/fake/share-snapshots/' + '2', + 'rel': 'self', + }, + { + 'href': 'http://localhost/fake/share-snapshots/2', + 'rel': 'bookmark', + }, + ], + }, + ] + } + self.assertEqual(res_dict, expected) diff --git a/cinder/tests/api/contrib/test_shares.py b/cinder/tests/api/contrib/test_shares.py new file mode 100644 index 0000000000..ef0e98fe49 --- /dev/null +++ b/cinder/tests/api/contrib/test_shares.py @@ -0,0 +1,245 @@ +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import webob + +from cinder.api.contrib import shares +from cinder import context +from cinder import exception +from cinder.share import api as share_api +from cinder import test +from cinder.tests.api.contrib import stubs +from cinder.tests.api import fakes + + +class ShareApiTest(test.TestCase): + """Share Api Test.""" + def setUp(self): + super(ShareApiTest, self).setUp() + self.controller = shares.ShareController() + + self.stubs.Set(share_api.API, 'get_all', + stubs.stub_get_all_shares) + self.stubs.Set(share_api.API, 'get', + stubs.stub_share_get) + self.stubs.Set(share_api.API, 'delete', stubs.stub_share_delete) + self.stubs.Set(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) + self.maxDiff = None + + def test_share_create(self): + self.stubs.Set(share_api.API, 'create', stubs.stub_share_create) + shr = { + "size": 100, + "name": "Share Test Name", + "description": "Share Test Desc", + "share_proto": "fakeproto", + "availability_zone": "zone1:host1" + } + body = {"share": shr} + req = fakes.HTTPRequest.blank('/shares') + res_dict = self.controller.create(req, body) + expected = { + 'share': { + 'name': 'Share Test Name', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_share_create_from_snapshot(self): + self.stubs.Set(share_api.API, 'create', stubs.stub_share_create) + shr = { + "size": 100, + "name": "Share Test Name", + "description": "Share Test Desc", + "share_proto": "fakeproto", + "availability_zone": "zone1:host1", + "snapshot_id": 333, + } + body = {"share": shr} + req = fakes.HTTPRequest.blank('/shares') + res_dict = self.controller.create(req, body) + expected = { + 'share': { + 'name': 'Share Test Name', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_share_creation_fails_with_bad_size(self): + shr = {"size": '', + "name": "Share Test Name", + "description": "Share Test Desc", + "share_proto": "fakeproto", + "availability_zone": "zone1:host1"} + body = {"share": shr} + req = fakes.HTTPRequest.blank('/shares') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, + body) + + def test_share_create_no_body(self): + body = {} + req = fakes.HTTPRequest.blank('/shares') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, + req, + body) + + def test_share_show(self): + req = fakes.HTTPRequest.blank('/shares/1') + res_dict = self.controller.show(req, '1') + print res_dict + expected = { + 'share': {'name': 'displayname', + 'availability_zone': 'fakeaz', + 'description': 'displaydesc', + 'export_location': 'fake_location', + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'share_proto': 'fakeproto', + 'size': 1, + 'snapshot_id': '2', + 'status': 'fakestatus', + 'links': [{'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self'}, + {'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark'}] + } + } + self.assertEqual(res_dict, expected) + + def test_share_show_no_share(self): + self.stubs.Set(share_api.API, 'get', + stubs.stub_share_get_notfound) + req = fakes.HTTPRequest.blank('/shares/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, '1') + + def test_share_delete(self): + req = fakes.HTTPRequest.blank('/shares/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_share_delete_no_share(self): + self.stubs.Set(share_api.API, 'get', + stubs.stub_share_get_notfound) + req = fakes.HTTPRequest.blank('/shares/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 1) + + def test_share_list_summary(self): + self.stubs.Set(share_api.API, 'get_all', + stubs.stub_share_get_all_by_project) + req = fakes.HTTPRequest.blank('/shares') + res_dict = self.controller.index(req) + expected = { + 'shares': [ + { + 'name': 'displayname', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_share_list_detail(self): + self.stubs.Set(share_api.API, 'get_all', + stubs.stub_share_get_all_by_project) + env = {'QUERY_STRING': 'name=Share+Test+Name'} + req = fakes.HTTPRequest.blank('/shares/detail', environ=env) + res_dict = self.controller.detail(req) + expected = { + 'shares': [ + { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'export_location': 'fake_location', + 'availability_zone': 'fakeaz', + 'name': 'displayname', + 'share_proto': 'fakeproto', + 'id': '1', + 'snapshot_id': '2', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/shares/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/shares/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_remove_invalid_options(self): + ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) + search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + expected_opts = {'a': 'a', 'c': 'c'} + allowed_opts = ['a', 'c'] + self.mox.ReplayAll() + shares.remove_invalid_options(ctx, search_opts, allowed_opts) + self.assertEqual(search_opts, expected_opts) + + def test_remove_invalid_options_admin(self): + ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) + search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + allowed_opts = ['a', 'c'] + self.mox.ReplayAll() + shares.remove_invalid_options(ctx, search_opts, allowed_opts) + self.assertEqual(search_opts, expected_opts) diff --git a/cinder/tests/api/contrib/test_types_extra_specs.py b/cinder/tests/api/contrib/test_types_extra_specs.py new file mode 100644 index 0000000000..7b5b46931e --- /dev/null +++ b/cinder/tests/api/contrib/test_types_extra_specs.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.contrib import types_extra_specs +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.tests.api import fakes +import cinder.wsgi + + +def return_create_volume_type_extra_specs(context, volume_type_id, + extra_specs): + return stub_volume_type_extra_specs() + + +def return_volume_type_extra_specs(context, volume_type_id): + return stub_volume_type_extra_specs() + + +def return_empty_volume_type_extra_specs(context, volume_type_id): + return {} + + +def delete_volume_type_extra_specs(context, volume_type_id, key): + pass + + +def stub_volume_type_extra_specs(): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return specs + + +def volume_type_get(context, volume_type_id): + pass + + +class VolumeTypesExtraSpecsTest(test.TestCase): + + def setUp(self): + super(VolumeTypesExtraSpecsTest, self).setUp() + self.flags(connection_type='fake', + host='fake', + notification_driver=[test_notifier.__name__]) + self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get) + self.api_path = '/v2/fake/os-volume-types/1/extra_specs' + self.controller = types_extra_specs.VolumeTypeExtraSpecsController() + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypesExtraSpecsTest, self).tearDown() + + def test_index(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.index(req, 1) + + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.index(req, 1) + + self.assertEqual(0, len(res_dict['extra_specs'])) + + def test_show(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key5') + res_dict = self.controller.show(req, 1, 'key5') + + self.assertEqual('value5', res_dict['key5']) + + def test_show_spec_not_found(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, 1, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', + delete_volume_type_extra_specs) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank(self.api_path + '/key5') + self.controller.delete(req, 1, 'key5') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + def test_create(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"extra_specs": {"key1": "value1"}} + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.create(req, 1, body) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_update_item(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1"} + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + res_dict = self.controller.update(req, 1, 'key1', body) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + self.assertEqual('value1', res_dict['key1']) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1", "key2": "value2"} + + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'key1', body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1"} + + req = fakes.HTTPRequest.blank(self.api_path + '/bad') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'bad', body) + + def _extra_specs_empty_update(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, '1', body) + + def test_update_no_body(self): + self._extra_specs_empty_update(body=None) + + def test_update_empty_body(self): + self._extra_specs_empty_update(body={}) + + def _extra_specs_create_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') + req.method = 'POST' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, '1', body) + + def test_create_no_body(self): + self._extra_specs_create_bad_body(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._extra_specs_create_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'extra_specs': 'string'} + self._extra_specs_create_bad_body(body=body) + + +class VolumeTypeExtraSpecsSerializerTest(test.TestCase): + def test_index_create_serializer(self): + serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate() + + # Just getting some input data + extra_specs = stub_volume_type_extra_specs() + text = serializer.serialize(dict(extra_specs=extra_specs)) + + print text + tree = etree.fromstring(text) + + self.assertEqual('extra_specs', tree.tag) + self.assertEqual(len(extra_specs), len(tree)) + seen = set(extra_specs.keys()) + for child in tree: + self.assertTrue(child.tag in seen) + self.assertEqual(extra_specs[child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_update_show_serializer(self): + serializer = types_extra_specs.VolumeTypeExtraSpecTemplate() + + exemplar = dict(key1='value1') + text = serializer.serialize(exemplar) + + print text + tree = etree.fromstring(text) + + self.assertEqual('key1', tree.tag) + self.assertEqual('value1', tree.text) + self.assertEqual(0, len(tree)) diff --git a/cinder/tests/api/contrib/test_types_manage.py b/cinder/tests/api/contrib/test_types_manage.py new file mode 100644 index 0000000000..c007236ec4 --- /dev/null +++ b/cinder/tests/api/contrib/test_types_manage.py @@ -0,0 +1,129 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api.contrib import types_manage +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import volume_types + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_destroy(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + pass + + +def return_volume_types_create(context, name, specs): + pass + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesManageApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesManageApiTest, self).setUp() + self.flags(connection_type='fake', + host='fake', + notification_driver=[test_notifier.__name__]) + self.controller = types_manage.VolumeTypesManageController() + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypesManageApiTest, self).tearDown() + + def test_volume_types_delete(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + + req = fakes.HTTPRequest.blank('/v2/fake/types/1') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + self.controller._delete(req, 1) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + def test_volume_types_delete_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, + req, '777') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + + def test_create(self): + self.stubs.Set(volume_types, 'create', + return_volume_types_create) + self.stubs.Set(volume_types, 'get_volume_type_by_name', + return_volume_types_get_by_name) + + body = {"volume_type": {"name": "vol_type_1", + "extra_specs": {"key1": "value1"}}} + req = fakes.HTTPRequest.blank('/v2/fake/types') + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + res_dict = self.controller._create(req, body) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + self.assertEqual(1, len(res_dict)) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def _create_volume_type_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types') + req.method = 'POST' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._create, req, body) + + def test_create_no_body(self): + self._create_volume_type_bad_body(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._create_volume_type_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'volume_type': 'string'} + self._create_volume_type_bad_body(body=body) diff --git a/cinder/tests/api/contrib/test_volume_actions.py b/cinder/tests/api/contrib/test_volume_actions.py new file mode 100644 index 0000000000..aed906985d --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_actions.py @@ -0,0 +1,248 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import uuid +import webob + +from cinder.api.contrib import volume_actions +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common.rpc import common as rpc_common +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder import volume +from cinder.volume import api as volume_api + + +FLAGS = flags.FLAGS + + +def fake_volume_api(*args, **kwargs): + return True + + +def fake_volume_get(*args, **kwargs): + return {'id': 'fake', 'host': 'fake'} + + +class VolumeActionsTest(test.TestCase): + + _actions = ('os-detach', 'os-reserve', 'os-unreserve') + + _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') + + def setUp(self): + super(VolumeActionsTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_api) + self.UUID = uuid.uuid4() + for _method in self._methods: + self.stubs.Set(volume.API, _method, fake_volume_api) + + self.stubs.Set(volume.API, 'get', fake_volume_get) + + def test_simple_api_actions(self): + app = fakes.wsgi_app() + for _action in self._actions: + req = webob.Request.blank('/v2/fake/volumes/%s/action' % + self.UUID) + req.method = 'POST' + req.body = jsonutils.dumps({_action: None}) + req.content_type = 'application/json' + res = req.get_response(app) + self.assertEqual(res.status_int, 202) + + def test_initialize_connection(self): + def fake_initialize_connection(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'initialize_connection', + fake_initialize_connection) + + body = {'os-initialize_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + + def test_terminate_connection(self): + def fake_terminate_connection(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'terminate_connection', + fake_terminate_connection) + + body = {'os-terminate_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_attach(self): + body = {'os-attach': {'instance_uuid': 'fake', + 'mountpoint': '/dev/vdc'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + +def stub_volume_get(self, context, volume_id): + volume = stubs.stub_volume(volume_id) + if volume_id == 5: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + return volume + + +def stub_upload_volume_to_image_service(self, context, volume, metadata, + force): + ret = {"id": volume['id'], + "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), + "status": 'uploading', + "display_description": volume['display_description'], + "size": volume['size'], + "volume_type": volume['volume_type'], + "image_id": 1, + "container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name'} + return ret + + +class VolumeImageActionsTest(test.TestCase): + def setUp(self): + super(VolumeImageActionsTest, self).setUp() + self.controller = volume_actions.VolumeActionsController() + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + def test_copy_volume_to_image(self): + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + res_dict = self.controller._volume_upload_image(req, id, body) + expected = {'os-volume_upload_image': {'id': id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'uploading', + 'display_description': 'displaydesc', + 'size': 1, + 'volume_type': {'name': 'vol_type_name'}, + 'image_id': 1, + 'container_format': 'bare', + 'disk_format': 'raw', + 'image_name': 'image_name'}} + self.assertDictMatch(res_dict, expected) + + def test_copy_volume_to_image_volumenotfound(self): + def stub_volume_get_raise_exc(self, context, volume_id): + raise exception.VolumeNotFound(volume_id=volume_id) + + self.stubs.Set(volume_api.API, 'get', stub_volume_get_raise_exc) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_invalidvolume(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise exception.InvalidVolume(reason='blah') + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_valueerror(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise ValueError + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_remoteerror(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise rpc_common.RemoteError + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) diff --git a/cinder/tests/api/contrib/test_volume_host_attribute.py b/cinder/tests/api/contrib/test_volume_host_attribute.py new file mode 100644 index 0000000000..a54c53f93d --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_host_attribute.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeHostAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeHostAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-host-attr:host'], 'host001') + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertFalse('os-vol-host-attr:host' in vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-host-attr:host'], 'host001') + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-host-attr:host' in vol[0]) + + def test_list_simple_volumes_no_host(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-host-attr:host' in vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + host_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_host_attribute/api/v1}host') + self.assertEqual(vol.get(host_key), 'host001') + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + host_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_host_attribute/api/v1}host') + self.assertEqual(vol.get(host_key), 'host001') diff --git a/cinder/tests/api/contrib/test_volume_image_metadata.py b/cinder/tests/api/contrib/test_volume_image_metadata.py new file mode 100644 index 0000000000..2c343cfcb4 --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_image_metadata.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid +from xml.dom import minidom + +import webob + +from cinder.api import common +from cinder.api.openstack.wsgi import MetadataXMLDeserializer +from cinder.api.openstack.wsgi import XMLDeserializer +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +fake_image_metadata = { + 'image_id': 'someid', + 'image_name': 'fake', + 'kernel_id': 'somekernel', + 'ramdisk_id': 'someramdisk', +} + + +def fake_get_volume_image_metadata(*args, **kwargs): + return fake_image_metadata + + +class VolumeImageMetadataTest(test.TestCase): + content_type = 'application/json' + + def setUp(self): + super(VolumeImageMetadataTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.stubs.Set(volume.API, 'get_volume_image_metadata', + fake_get_volume_image_metadata) + self.UUID = uuid.uuid4() + + def _make_request(self, url): + req = webob.Request.blank(url) + req.accept = self.content_type + res = req.get_response(fakes.wsgi_app()) + return res + + def _get_image_metadata(self, body): + return json.loads(body)['volume']['volume_image_metadata'] + + def _get_image_metadata_list(self, body): + return [ + volume['volume_image_metadata'] + for volume in json.loads(body)['volumes'] + ] + + def test_get_volume(self): + res = self._make_request('/v2/fake/volumes/%s' % self.UUID) + self.assertEqual(res.status_int, 200) + self.assertEqual(self._get_image_metadata(res.body), + fake_image_metadata) + + def test_list_detail_volumes(self): + res = self._make_request('/v2/fake/volumes/detail') + self.assertEqual(res.status_int, 200) + self.assertEqual(self._get_image_metadata_list(res.body)[0], + fake_image_metadata) + + +class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): + metadata_node_name = "volume_image_metadata" + + +class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): + content_type = 'application/xml' + + def _get_image_metadata(self, body): + deserializer = XMLDeserializer() + volume = deserializer.find_first_child_named( + minidom.parseString(body), 'volume') + image_metadata = deserializer.find_first_child_named( + volume, 'volume_image_metadata') + return MetadataXMLDeserializer().extract_metadata(image_metadata) + + def _get_image_metadata_list(self, body): + deserializer = XMLDeserializer() + volumes = deserializer.find_first_child_named( + minidom.parseString(body), 'volumes') + volume_list = deserializer.find_children_named(volumes, 'volume') + image_metadata_list = [ + deserializer.find_first_child_named( + volume, 'volume_image_metadata' + ) + for volume in volume_list] + return map(MetadataXMLDeserializer().extract_metadata, + image_metadata_list) diff --git a/cinder/tests/api/contrib/test_volume_tenant_attribute.py b/cinder/tests/api/contrib/test_volume_tenant_attribute.py new file mode 100644 index 0000000000..e6e10e1e2a --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_tenant_attribute.py @@ -0,0 +1,137 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9' + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': PROJECT_ID, + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeTenantAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeTenantAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-tenant-attr:tenant_id'], PROJECT_ID) + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertFalse('os-vol-tenant-attr:tenant_id' in vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-tenant-attr:tenant_id'], PROJECT_ID) + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-tenant-attr:tenant_id' in vol[0]) + + def test_list_simple_volumes_no_tenant_id(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertFalse('os-vol-tenant-attr:tenant_id' in vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + tenant_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_tenant_attribute/api/v1}tenant_id') + self.assertEqual(vol.get(tenant_key), PROJECT_ID) + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + tenant_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_tenant_attribute/api/v1}tenant_id') + self.assertEqual(vol.get(tenant_key), PROJECT_ID) diff --git a/cinder/tests/api/extensions/__init__.py b/cinder/tests/api/extensions/__init__.py new file mode 100644 index 0000000000..848908a953 --- /dev/null +++ b/cinder/tests/api/extensions/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/tests/api/extensions/foxinsocks.py b/cinder/tests/api/extensions/foxinsocks.py new file mode 100644 index 0000000000..45fc106b88 --- /dev/null +++ b/cinder/tests/api/extensions/foxinsocks.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi + + +class FoxInSocksController(object): + + def index(self, req): + return "Try to say this Mr. Knox, sir..." + + +class FoxInSocksServerControllerExtension(wsgi.Controller): + @wsgi.action('add_tweedle') + def _add_tweedle(self, req, id, body): + + return "Tweedle Beetle Added." + + @wsgi.action('delete_tweedle') + def _delete_tweedle(self, req, id, body): + + return "Tweedle Beetle Deleted." + + @wsgi.action('fail') + def _fail(self, req, id, body): + + raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') + + +class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): + @wsgi.extends + def show(self, req, resp_obj, id): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') + + +class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): + @wsgi.extends + def show(self, req, resp_obj, id): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + resp_obj.obj['big_bands'] = 'Pig Bands!' + + +class Foxinsocks(extensions.ExtensionDescriptor): + """The Fox In Socks Extension""" + + name = "Fox In Socks" + alias = "FOXNSOX" + namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" + updated = "2011-01-22T13:25:27-06:00" + + def __init__(self, ext_mgr): + ext_mgr.register(self) + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('foxnsocks', + FoxInSocksController()) + resources.append(resource) + return resources + + def get_controller_extensions(self): + extension_list = [] + + extension_set = [ + (FoxInSocksServerControllerExtension, 'servers'), + (FoxInSocksFlavorGooseControllerExtension, 'flavors'), + (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] + for klass, collection in extension_set: + controller = klass() + ext = extensions.ControllerExtension(self, collection, controller) + extension_list.append(ext) + + return extension_list diff --git a/cinder/tests/api/fakes.py b/cinder/tests/api/fakes.py new file mode 100644 index 0000000000..3fd21cfbaf --- /dev/null +++ b/cinder/tests/api/fakes.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import routes +import webob +import webob.dec +import webob.request + +from cinder.api.middleware import auth +from cinder.api.middleware import fault +from cinder.api.openstack import wsgi as os_wsgi +from cinder.api import urlmap +from cinder.api.v2 import limits +from cinder.api.v2 import router +from cinder.api import versions +from cinder import context +from cinder import exception as exc +from cinder.openstack.common import timeutils +from cinder import wsgi + + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +class Context(object): + pass + + +class FakeRouter(wsgi.Router): + def __init__(self, ext_mgr=None): + pass + + @webob.dec.wsgify + def __call__(self, req): + res = webob.Response() + res.status = '200' + res.headers['X-Test-Success'] = 'True' + return res + + +@webob.dec.wsgify +def fake_wsgi(self, req): + return self.application + + +def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, + use_no_auth=False, ext_mgr=None): + if not inner_app_v2: + inner_app_v2 = router.APIRouter(ext_mgr) + + if fake_auth: + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext('fake', 'fake', auth_token=True) + api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, + inner_app_v2)) + elif use_no_auth: + api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + else: + api_v2 = fault.FaultWrapper(auth.AuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + + mapper = urlmap.URLMap() + mapper['/v2'] = api_v2 + mapper['/'] = fault.FaultWrapper(versions.Versions()) + return mapper + + +def stub_out_rate_limiting(stubs): + def fake_rate_init(self, app): + # super(limits.RateLimitingMiddleware, self).__init__(app) + self.application = app + + # FIXME(ja): unsure about limits in volumes + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__init__', fake_rate_init) + + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__call__', fake_wsgi) + + +def stub_out_key_pair_funcs(stubs, have_key_pair=True): + def key_pair(context, user_id): + return [dict(name='key', public_key='public_key')] + + def one_key_pair(context, user_id, name): + if name == 'key': + return dict(name='key', public_key='public_key') + else: + raise exc.KeypairNotFound(user_id=user_id, name=name) + + def no_key_pair(context, user_id): + return [] + + +class FakeToken(object): + id_count = 0 + + def __getitem__(self, key): + return getattr(self, key) + + def __init__(self, **kwargs): + FakeToken.id_count += 1 + self.id = FakeToken.id_count + for k, v in kwargs.iteritems(): + setattr(self, k, v) + + +class FakeRequestContext(context.RequestContext): + def __init__(self, *args, **kwargs): + kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') + return super(FakeRequestContext, self).__init__(*args, **kwargs) + + +class HTTPRequest(webob.Request): + + @classmethod + def blank(cls, *args, **kwargs): + kwargs['base_url'] = 'http://localhost/v1' + use_admin_context = kwargs.pop('use_admin_context', False) + out = webob.Request.blank(*args, **kwargs) + out.environ['cinder.context'] = FakeRequestContext( + 'fake_user', + 'fake', + is_admin=use_admin_context) + return out + + +class TestRouter(wsgi.Router): + def __init__(self, controller): + mapper = routes.Mapper() + mapper.resource("test", "tests", + controller=os_wsgi.Resource(controller)) + super(TestRouter, self).__init__(mapper) + + +class FakeAuthDatabase(object): + data = {} + + @staticmethod + def auth_token_get(context, token_hash): + return FakeAuthDatabase.data.get(token_hash, None) + + @staticmethod + def auth_token_create(context, token): + fake_token = FakeToken(created_at=timeutils.utcnow(), **token) + FakeAuthDatabase.data[fake_token.token_hash] = fake_token + FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token + return fake_token + + @staticmethod + def auth_token_destroy(context, token_id): + token = FakeAuthDatabase.data.get('id_%i' % token_id) + if token and token.token_hash in FakeAuthDatabase.data: + del FakeAuthDatabase.data[token.token_hash] + del FakeAuthDatabase.data['id_%i' % token_id] + + +class FakeRateLimiter(object): + def __init__(self, application): + self.application = application + + @webob.dec.wsgify + def __call__(self, req): + return self.application + + +def get_fake_uuid(token=0): + if token not in FAKE_UUIDS: + FAKE_UUIDS[token] = str(uuid.uuid4()) + return FAKE_UUIDS[token] diff --git a/cinder/tests/api/middleware/__init__.py b/cinder/tests/api/middleware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/middleware/test_auth.py b/cinder/tests/api/middleware/test_auth.py new file mode 100644 index 0000000000..4fca13fe62 --- /dev/null +++ b/cinder/tests/api/middleware/test_auth.py @@ -0,0 +1,59 @@ +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +import cinder.api.middleware.auth +from cinder import test + + +class TestCinderKeystoneContextMiddleware(test.TestCase): + + def setUp(self): + super(TestCinderKeystoneContextMiddleware, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + self.context = req.environ['cinder.context'] + return webob.Response() + + self.context = None + self.middleware = (cinder.api.middleware.auth + .CinderKeystoneContext(fake_app)) + self.request = webob.Request.blank('/') + self.request.headers['X_TENANT_ID'] = 'testtenantid' + self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' + + def test_no_user_or_user_id(self): + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '401 Unauthorized') + + def test_user_only(self): + self.request.headers['X_USER_ID'] = 'testuserid' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') + + def test_user_id_only(self): + self.request.headers['X_USER'] = 'testuser' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuser') + + def test_user_id_trumps_user(self): + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_USER'] = 'testuser' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') diff --git a/cinder/tests/api/middleware/test_faults.py b/cinder/tests/api/middleware/test_faults.py new file mode 100644 index 0000000000..aff1dfbcc9 --- /dev/null +++ b/cinder/tests/api/middleware/test_faults.py @@ -0,0 +1,208 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.dom import minidom + +import webob +import webob.dec +import webob.exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.openstack.common import jsonutils +from cinder import test + + +class TestFaults(test.TestCase): + """Tests covering `cinder.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + """Remove characters from string which hinder XML equality testing.""" + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault_json(self): + """Test fault serialized to JSON via file-extension and/or header.""" + requests = [ + webob.Request.blank('/.json'), + webob.Request.blank('/', headers={"Accept": "application/json"}), + ] + + for request in requests: + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + expected = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + actual = jsonutils.loads(response.body) + + self.assertEqual(response.content_type, "application/json") + self.assertEqual(expected, actual) + + def test_413_fault_json(self): + """Test fault serialized to JSON via file-extension and/or header.""" + requests = [ + webob.Request.blank('/.json'), + webob.Request.blank('/', headers={"Accept": "application/json"}), + ] + + for request in requests: + exc = webob.exc.HTTPRequestEntityTooLarge + fault = wsgi.Fault(exc(explanation='sorry', + headers={'Retry-After': 4})) + response = request.get_response(fault) + + expected = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + actual = jsonutils.loads(response.body) + + self.assertEqual(response.content_type, "application/json") + self.assertEqual(expected, actual) + + def test_raise(self): + """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?')) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 404) + self.assertTrue('whut?' in resp.body) + + def test_raise_403(self): + """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?')) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 403) + self.assertTrue('resizeNotAllowed' not in resp.body) + self.assertTrue('forbidden' in resp.body) + + def test_fault_has_status_int(self): + """Ensure the status_int is set correctly on faults""" + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) + self.assertEqual(fault.status_int, 400) + + def test_xml_serializer(self): + """Ensure that a v1.1 request responds with a v1 xmlns""" + request = webob.Request.blank('/v1', + headers={"Accept": "application/xml"}) + + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + self.assertTrue(common.XML_NS_V1 in response.body) + self.assertEqual(response.content_type, "application/xml") + self.assertEqual(response.status_int, 400) + + +class FaultsXMLSerializationTestV11(test.TestCase): + """Tests covering `cinder.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault(self): + metadata = {'attributes': {"badRequest": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + scram + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_413_fault(self): + metadata = {'attributes': {"overLimit": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + sorry + 4 + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_404_fault(self): + metadata = {'attributes': {"itemNotFound": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "itemNotFound": { + "message": "sorry", + "code": 404, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + sorry + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/cinder/tests/api/middleware/test_sizelimit.py b/cinder/tests/api/middleware/test_sizelimit.py new file mode 100644 index 0000000000..3b87a2cd96 --- /dev/null +++ b/cinder/tests/api/middleware/test_sizelimit.py @@ -0,0 +1,100 @@ +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO +import webob + +from cinder.api.middleware import sizelimit +from cinder import flags +from cinder import test + +FLAGS = flags.FLAGS +MAX_REQUEST_BODY_SIZE = FLAGS.osapi_max_request_body_size + + +class TestLimitingReader(test.TestCase): + + def test_limiting_reader(self): + BYTES = 1024 + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + for chunk in sizelimit.LimitingReader(data, BYTES): + bytes_read += len(chunk) + + self.assertEquals(bytes_read, BYTES) + + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + reader = sizelimit.LimitingReader(data, BYTES) + byte = reader.read(1) + while len(byte) != 0: + bytes_read += 1 + byte = reader.read(1) + + self.assertEquals(bytes_read, BYTES) + + def test_limiting_reader_fails(self): + BYTES = 1024 + + def _consume_all_iter(): + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + for chunk in sizelimit.LimitingReader(data, BYTES - 1): + bytes_read += len(chunk) + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + _consume_all_iter) + + def _consume_all_read(): + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + reader = sizelimit.LimitingReader(data, BYTES - 1) + byte = reader.read(1) + while len(byte) != 0: + bytes_read += 1 + byte = reader.read(1) + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + _consume_all_read) + + +class TestRequestBodySizeLimiter(test.TestCase): + + def setUp(self): + super(TestRequestBodySizeLimiter, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + return webob.Response(req.body) + + self.middleware = sizelimit.RequestBodySizeLimiter(fake_app) + self.request = webob.Request.blank('/', method='POST') + + def test_content_length_acceptable(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + self.request.body = "0" * MAX_REQUEST_BODY_SIZE + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 200) + + def test_content_length_too_large(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 413) + + def test_request_too_large_no_content_length(self): + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + self.request.headers['Content-Length'] = None + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 413) diff --git a/cinder/tests/api/openstack/__init__.py b/cinder/tests/api/openstack/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/api/openstack/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/openstack/test_wsgi.py b/cinder/tests/api/openstack/test_wsgi.py new file mode 100644 index 0000000000..77308602d0 --- /dev/null +++ b/cinder/tests/api/openstack/test_wsgi.py @@ -0,0 +1,858 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import inspect +import webob + +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import test +from cinder.tests.api import fakes + + +class RequestTest(test.TestCase): + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = "" + self.assertEqual(None, request.get_content_type()) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.headers["Content-Type"] = "text/html" + request.body = "asdf
" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept(self): + for content_type in ('application/xml', + 'application/vnd.openstack.volume+xml', + 'application/json', + 'application/vnd.openstack.volume+json'): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) + + def test_content_type_from_accept_best(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = ("application/json; q=0.3, " + "application/xml; q=0.9") + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + +class ActionDispatcherTest(test.TestCase): + def test_dispatch(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + self.assertEqual(serializer.dispatch({}, action='create'), 'pants') + + def test_dispatch_action_None(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.dispatch({}, action=None), 'trousers') + + def test_dispatch_default(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.dispatch({}, action='update'), 'trousers') + + +class DictSerializerTest(test.TestCase): + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + self.assertEqual(serializer.serialize({}, 'update'), '') + + +class XMLDictSerializerTest(test.TestCase): + def test_xml(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_xml = '(2,3)' + serializer = wsgi.XMLDictSerializer(xmlns="asdf") + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + +class JSONDictSerializerTest(test.TestCase): + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = '{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_json) + + +class TextDeserializerTest(test.TestCase): + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + self.assertEqual(deserializer.deserialize({}, 'update'), {}) + + +class JSONDeserializerTest(test.TestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } + deserializer = wsgi.JSONDeserializer() + self.assertEqual(deserializer.deserialize(data), as_dict) + + +class XMLDeserializerTest(test.TestCase): + def test_xml(self): + xml = """ + + 123 + 1 + 1 + + """.strip() + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } + metadata = {'plurals': {'bs': 'b', 'ts': 't'}} + deserializer = wsgi.XMLDeserializer(metadata=metadata) + self.assertEqual(deserializer.deserialize(xml), as_dict) + + def test_xml_empty(self): + xml = """""" + as_dict = {"body": {"a": {}}} + deserializer = wsgi.XMLDeserializer() + self.assertEqual(deserializer.deserialize(xml), as_dict) + + +class ResourceTest(test.TestCase): + def test_resource_call(self): + class Controller(object): + def index(self, req): + return 'off' + + req = webob.Request.blank('/tests') + app = fakes.TestRouter(Controller()) + response = req.get_response(app) + self.assertEqual(response.body, 'off') + self.assertEqual(response.status_int, 200) + + def test_resource_not_authorized(self): + class Controller(object): + def index(self, req): + raise exception.NotAuthorized() + + req = webob.Request.blank('/tests') + app = fakes.TestRouter(Controller()) + response = req.get_response(app) + self.assertEqual(response.status_int, 403) + + def test_dispatch(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'index', None, '') + actual = resource.dispatch(method, None, {'pants': 'off'}) + expected = 'off' + self.assertEqual(actual, expected) + + def test_get_method_undefined_controller_action(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises(AttributeError, resource.get_method, + None, 'create', None, '') + + def test_get_method_action_json(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'action', + 'application/json', + '{"fooAction": true}') + self.assertEqual(controller._action_foo, method) + + def test_get_method_action_xml(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'action', + 'application/xml', + 'true') + self.assertEqual(controller._action_foo, method) + + def test_get_method_action_bad_body(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises(exception.MalformedRequestBody, resource.get_method, + None, 'action', 'application/json', '{}') + + def test_get_method_unknown_controller_action(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises(KeyError, resource.get_method, + None, 'action', 'application/json', + '{"barAction": true}') + + def test_get_method_action_method(self): + class Controller(): + def action(self, req, pants=None): + return pants + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'action', + 'application/xml', + 'true= len(self.ext_list)) + + # Make sure that at least Fox in Sox is correct. + (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] + self.assertEqual(fox_ext.get('name'), 'Fox In Socks') + self.assertEqual( + fox_ext.get('namespace'), + 'http://www.fox.in.socks/api/ext/pie/v1.0') + self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00') + self.assertEqual( + fox_ext.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension') + + xmlutil.validate_schema(root, 'extensions') + + def test_get_extension_xml(self): + app = router.APIRouter() + request = webob.Request.blank("/fake/extensions/FOXNSOX") + request.accept = "application/xml" + response = request.get_response(app) + self.assertEqual(200, response.status_int) + xml = response.body + + root = etree.XML(xml) + self.assertEqual(root.tag.split('extension')[0], NS) + self.assertEqual(root.get('alias'), 'FOXNSOX') + self.assertEqual(root.get('name'), 'Fox In Socks') + self.assertEqual( + root.get('namespace'), + 'http://www.fox.in.socks/api/ext/pie/v1.0') + self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00') + self.assertEqual( + root.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension') + + xmlutil.validate_schema(root, 'extension') diff --git a/cinder/tests/api/test_router.py b/cinder/tests/api/test_router.py new file mode 100644 index 0000000000..9a3f3da2a8 --- /dev/null +++ b/cinder/tests/api/test_router.py @@ -0,0 +1,158 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.openstack import wsgi +from cinder.api.v1 import router +from cinder.api.v1 import snapshots +from cinder.api.v1 import volumes +from cinder.api import versions +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +class FakeController(object): + def __init__(self, ext_mgr=None): + self.ext_mgr = ext_mgr + + def index(self, req): + return {} + + def detail(self, req): + return {} + + +def create_resource(ext_mgr): + return wsgi.Resource(FakeController(ext_mgr)) + + +class VolumeRouterTestCase(test.TestCase): + def setUp(self): + super(VolumeRouterTestCase, self).setUp() + # NOTE(vish): versions is just returning text so, no need to stub. + self.stubs.Set(snapshots, 'create_resource', create_resource) + self.stubs.Set(volumes, 'create_resource', create_resource) + self.app = router.APIRouter() + + def test_versions(self): + req = fakes.HTTPRequest.blank('') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(302, response.status_int) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_versions_multi(self): + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.multi, req, {}) + ids = [v['id'] for v in result['choices']] + self.assertEqual(set(ids), set(['v1.0', 'v2.0'])) + + def test_versions_multi_disable_v1(self): + self.flags(enable_v1_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.multi, req, {}) + ids = [v['id'] for v in result['choices']] + self.assertEqual(set(ids), set(['v2.0'])) + + def test_versions_multi_disable_v2(self): + self.flags(enable_v2_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.multi, req, {}) + ids = [v['id'] for v in result['choices']] + self.assertEqual(set(ids), set(['v1.0'])) + + def test_versions_index(self): + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + ids = [v['id'] for v in result['versions']] + self.assertEqual(set(ids), set(['v1.0', 'v2.0'])) + + def test_versions_index_disable_v1(self): + self.flags(enable_v1_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + ids = [v['id'] for v in result['versions']] + self.assertEqual(set(ids), set(['v2.0'])) + + def test_versions_index_disable_v2(self): + self.flags(enable_v2_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + ids = [v['id'] for v in result['versions']] + self.assertEqual(set(ids), set(['v1.0'])) + + def test_volumes(self): + req = fakes.HTTPRequest.blank('/fake/volumes') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_volumes_detail(self): + req = fakes.HTTPRequest.blank('/fake/volumes/detail') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_types(self): + req = fakes.HTTPRequest.blank('/fake/types') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_snapshots(self): + req = fakes.HTTPRequest.blank('/fake/snapshots') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_snapshots_detail(self): + req = fakes.HTTPRequest.blank('/fake/snapshots/detail') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) diff --git a/cinder/tests/api/test_wsgi.py b/cinder/tests/api/test_wsgi.py new file mode 100644 index 0000000000..c68f8158eb --- /dev/null +++ b/cinder/tests/api/test_wsgi.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test WSGI basics and provide some helper functions for other WSGI tests. +""" + +from cinder import test + +import routes +import webob + +from cinder import wsgi + + +class Test(test.TestCase): + + def test_debug(self): + + class Application(wsgi.Application): + """Dummy application to test debug.""" + + def __call__(self, environ, start_response): + start_response("200", [("X-Test", "checking")]) + return ['Test result'] + + application = wsgi.Debug(Application()) + result = webob.Request.blank('/').get_response(application) + self.assertEqual(result.body, "Test result") + + def test_router(self): + + class Application(wsgi.Application): + """Test application to call from router.""" + + def __call__(self, environ, start_response): + start_response("200", []) + return ['Router result'] + + class Router(wsgi.Router): + """Test router.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect("/test", controller=Application()) + super(Router, self).__init__(mapper) + + result = webob.Request.blank('/test').get_response(Router()) + self.assertEqual(result.body, "Router result") + result = webob.Request.blank('/bad').get_response(Router()) + self.assertNotEqual(result.body, "Router result") diff --git a/cinder/tests/api/test_xmlutil.py b/cinder/tests/api/test_xmlutil.py new file mode 100644 index 0000000000..4e145e741d --- /dev/null +++ b/cinder/tests/api/test_xmlutil.py @@ -0,0 +1,697 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree + +from cinder.api import xmlutil +from cinder import test + + +class SelectorTest(test.TestCase): + obj_for_test = {'test': {'name': 'test', + 'values': [1, 2, 3], + 'attrs': {'foo': 1, + 'bar': 2, + 'baz': 3, }, }, } + + def test_empty_selector(self): + sel = xmlutil.Selector() + self.assertEqual(len(sel.chain), 0) + self.assertEqual(sel(self.obj_for_test), self.obj_for_test) + + def test_dict_selector(self): + sel = xmlutil.Selector('test') + self.assertEqual(len(sel.chain), 1) + self.assertEqual(sel.chain[0], 'test') + self.assertEqual(sel(self.obj_for_test), + self.obj_for_test['test']) + + def test_datum_selector(self): + sel = xmlutil.Selector('test', 'name') + self.assertEqual(len(sel.chain), 2) + self.assertEqual(sel.chain[0], 'test') + self.assertEqual(sel.chain[1], 'name') + self.assertEqual(sel(self.obj_for_test), 'test') + + def test_list_selector(self): + sel = xmlutil.Selector('test', 'values', 0) + self.assertEqual(len(sel.chain), 3) + self.assertEqual(sel.chain[0], 'test') + self.assertEqual(sel.chain[1], 'values') + self.assertEqual(sel.chain[2], 0) + self.assertEqual(sel(self.obj_for_test), 1) + + def test_items_selector(self): + sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items) + self.assertEqual(len(sel.chain), 3) + self.assertEqual(sel.chain[2], xmlutil.get_items) + for key, val in sel(self.obj_for_test): + self.assertEqual(self.obj_for_test['test']['attrs'][key], val) + + def test_missing_key_selector(self): + sel = xmlutil.Selector('test2', 'attrs') + self.assertEqual(sel(self.obj_for_test), None) + self.assertRaises(KeyError, sel, self.obj_for_test, True) + + def test_constant_selector(self): + sel = xmlutil.ConstantSelector('Foobar') + self.assertEqual(sel.value, 'Foobar') + self.assertEqual(sel(self.obj_for_test), 'Foobar') + + +class TemplateElementTest(test.TestCase): + def test_element_initial_attributes(self): + # Create a template element with some attributes + elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3), + c=4, d=5, e=6) + + # Verify all the attributes are as expected + expected = dict(a=1, b=2, c=4, d=5, e=6) + for k, v in expected.items(): + self.assertEqual(elem.attrib[k].chain[0], v) + + def test_element_get_attributes(self): + expected = dict(a=1, b=2, c=3) + + # Create a template element with some attributes + elem = xmlutil.TemplateElement('test', attrib=expected) + + # Verify that get() retrieves the attributes + for k, v in expected.items(): + self.assertEqual(elem.get(k).chain[0], v) + + def test_element_set_attributes(self): + attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar')) + + # Create a bare template element with no attributes + elem = xmlutil.TemplateElement('test') + + # Set the attribute values + for k, v in attrs.items(): + elem.set(k, v) + + # Now verify what got set + self.assertEqual(len(elem.attrib['a'].chain), 1) + self.assertEqual(elem.attrib['a'].chain[0], 'a') + self.assertEqual(len(elem.attrib['b'].chain), 1) + self.assertEqual(elem.attrib['b'].chain[0], 'foo') + self.assertEqual(elem.attrib['c'], attrs['c']) + + def test_element_attribute_keys(self): + attrs = dict(a=1, b=2, c=3, d=4) + expected = set(attrs.keys()) + + # Create a template element with some attributes + elem = xmlutil.TemplateElement('test', attrib=attrs) + + # Now verify keys + self.assertEqual(set(elem.keys()), expected) + + def test_element_attribute_items(self): + expected = dict(a=xmlutil.Selector(1), + b=xmlutil.Selector(2), + c=xmlutil.Selector(3)) + keys = set(expected.keys()) + + # Create a template element with some attributes + elem = xmlutil.TemplateElement('test', attrib=expected) + + # Now verify items + for k, v in elem.items(): + self.assertEqual(expected[k], v) + keys.remove(k) + + # Did we visit all keys? + self.assertEqual(len(keys), 0) + + def test_element_selector_none(self): + # Create a template element with no selector + elem = xmlutil.TemplateElement('test') + + self.assertEqual(len(elem.selector.chain), 0) + + def test_element_selector_string(self): + # Create a template element with a string selector + elem = xmlutil.TemplateElement('test', selector='test') + + self.assertEqual(len(elem.selector.chain), 1) + self.assertEqual(elem.selector.chain[0], 'test') + + def test_element_selector(self): + sel = xmlutil.Selector('a', 'b') + + # Create a template element with an explicit selector + elem = xmlutil.TemplateElement('test', selector=sel) + + self.assertEqual(elem.selector, sel) + + def test_element_subselector_none(self): + # Create a template element with no subselector + elem = xmlutil.TemplateElement('test') + + self.assertEqual(elem.subselector, None) + + def test_element_subselector_string(self): + # Create a template element with a string subselector + elem = xmlutil.TemplateElement('test', subselector='test') + + self.assertEqual(len(elem.subselector.chain), 1) + self.assertEqual(elem.subselector.chain[0], 'test') + + def test_element_subselector(self): + sel = xmlutil.Selector('a', 'b') + + # Create a template element with an explicit subselector + elem = xmlutil.TemplateElement('test', subselector=sel) + + self.assertEqual(elem.subselector, sel) + + def test_element_append_child(self): + # Create an element + elem = xmlutil.TemplateElement('test') + + # Make sure the element starts off empty + self.assertEqual(len(elem), 0) + + # Create a child element + child = xmlutil.TemplateElement('child') + + # Append the child to the parent + elem.append(child) + + # Verify that the child was added + self.assertEqual(len(elem), 1) + self.assertEqual(elem[0], child) + self.assertEqual('child' in elem, True) + self.assertEqual(elem['child'], child) + + # Ensure that multiple children of the same name are rejected + child2 = xmlutil.TemplateElement('child') + self.assertRaises(KeyError, elem.append, child2) + + def test_element_extend_children(self): + # Create an element + elem = xmlutil.TemplateElement('test') + + # Make sure the element starts off empty + self.assertEqual(len(elem), 0) + + # Create a few children + children = [xmlutil.TemplateElement('child1'), + xmlutil.TemplateElement('child2'), + xmlutil.TemplateElement('child3'), ] + + # Extend the parent by those children + elem.extend(children) + + # Verify that the children were added + self.assertEqual(len(elem), 3) + for idx in range(len(elem)): + self.assertEqual(children[idx], elem[idx]) + self.assertEqual(children[idx].tag in elem, True) + self.assertEqual(elem[children[idx].tag], children[idx]) + + # Ensure that multiple children of the same name are rejected + children2 = [xmlutil.TemplateElement('child4'), + xmlutil.TemplateElement('child1'), ] + self.assertRaises(KeyError, elem.extend, children2) + + # Also ensure that child4 was not added + self.assertEqual(len(elem), 3) + self.assertEqual(elem[-1].tag, 'child3') + + def test_element_insert_child(self): + # Create an element + elem = xmlutil.TemplateElement('test') + + # Make sure the element starts off empty + self.assertEqual(len(elem), 0) + + # Create a few children + children = [xmlutil.TemplateElement('child1'), + xmlutil.TemplateElement('child2'), + xmlutil.TemplateElement('child3'), ] + + # Extend the parent by those children + elem.extend(children) + + # Create a child to insert + child = xmlutil.TemplateElement('child4') + + # Insert it + elem.insert(1, child) + + # Ensure the child was inserted in the right place + self.assertEqual(len(elem), 4) + children.insert(1, child) + for idx in range(len(elem)): + self.assertEqual(children[idx], elem[idx]) + self.assertEqual(children[idx].tag in elem, True) + self.assertEqual(elem[children[idx].tag], children[idx]) + + # Ensure that multiple children of the same name are rejected + child2 = xmlutil.TemplateElement('child2') + self.assertRaises(KeyError, elem.insert, 2, child2) + + def test_element_remove_child(self): + # Create an element + elem = xmlutil.TemplateElement('test') + + # Make sure the element starts off empty + self.assertEqual(len(elem), 0) + + # Create a few children + children = [xmlutil.TemplateElement('child1'), + xmlutil.TemplateElement('child2'), + xmlutil.TemplateElement('child3'), ] + + # Extend the parent by those children + elem.extend(children) + + # Create a test child to remove + child = xmlutil.TemplateElement('child2') + + # Try to remove it + self.assertRaises(ValueError, elem.remove, child) + + # Ensure that no child was removed + self.assertEqual(len(elem), 3) + + # Now remove a legitimate child + elem.remove(children[1]) + + # Ensure that the child was removed + self.assertEqual(len(elem), 2) + self.assertEqual(elem[0], children[0]) + self.assertEqual(elem[1], children[2]) + self.assertEqual('child2' in elem, False) + + # Ensure the child cannot be retrieved by name + def get_key(elem, key): + return elem[key] + self.assertRaises(KeyError, get_key, elem, 'child2') + + def test_element_text(self): + # Create an element + elem = xmlutil.TemplateElement('test') + + # Ensure that it has no text + self.assertEqual(elem.text, None) + + # Try setting it to a string and ensure it becomes a selector + elem.text = 'test' + self.assertEqual(hasattr(elem.text, 'chain'), True) + self.assertEqual(len(elem.text.chain), 1) + self.assertEqual(elem.text.chain[0], 'test') + + # Try resetting the text to None + elem.text = None + self.assertEqual(elem.text, None) + + # Now make up a selector and try setting the text to that + sel = xmlutil.Selector() + elem.text = sel + self.assertEqual(elem.text, sel) + + # Finally, try deleting the text and see what happens + del elem.text + self.assertEqual(elem.text, None) + + def test_apply_attrs(self): + # Create a template element + attrs = dict(attr1=xmlutil.ConstantSelector(1), + attr2=xmlutil.ConstantSelector(2)) + tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs) + + # Create an etree element + elem = etree.Element('test') + + # Apply the template to the element + tmpl_elem.apply(elem, None) + + # Now, verify the correct attributes were set + for k, v in elem.items(): + self.assertEqual(str(attrs[k].value), v) + + def test_apply_text(self): + # Create a template element + tmpl_elem = xmlutil.TemplateElement('test') + tmpl_elem.text = xmlutil.ConstantSelector(1) + + # Create an etree element + elem = etree.Element('test') + + # Apply the template to the element + tmpl_elem.apply(elem, None) + + # Now, verify the text was set + self.assertEqual(str(tmpl_elem.text.value), elem.text) + + def test__render(self): + attrs = dict(attr1=xmlutil.ConstantSelector(1), + attr2=xmlutil.ConstantSelector(2), + attr3=xmlutil.ConstantSelector(3)) + + # Create a master template element + master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) + + # Create a couple of slave template element + slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), + xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] + + # Try the render + elem = master_elem._render(None, None, slave_elems, None) + + # Verify the particulars of the render + self.assertEqual(elem.tag, 'test') + self.assertEqual(len(elem.nsmap), 0) + for k, v in elem.items(): + self.assertEqual(str(attrs[k].value), v) + + # Create a parent for the element to be rendered + parent = etree.Element('parent') + + # Try the render again... + elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) + + # Verify the particulars of the render + self.assertEqual(len(parent), 1) + self.assertEqual(parent[0], elem) + self.assertEqual(len(elem.nsmap), 1) + self.assertEqual(elem.nsmap['a'], 'foo') + + def test_render(self): + # Create a template element + tmpl_elem = xmlutil.TemplateElement('test') + tmpl_elem.text = xmlutil.Selector() + + # Create the object we're going to render + obj = ['elem1', 'elem2', 'elem3', 'elem4'] + + # Try a render with no object + elems = tmpl_elem.render(None, None) + self.assertEqual(len(elems), 0) + + # Try a render with one object + elems = tmpl_elem.render(None, 'foo') + self.assertEqual(len(elems), 1) + self.assertEqual(elems[0][0].text, 'foo') + self.assertEqual(elems[0][1], 'foo') + + # Now, try rendering an object with multiple entries + parent = etree.Element('parent') + elems = tmpl_elem.render(parent, obj) + self.assertEqual(len(elems), 4) + + # Check the results + for idx in range(len(obj)): + self.assertEqual(elems[idx][0].text, obj[idx]) + self.assertEqual(elems[idx][1], obj[idx]) + + def test_subelement(self): + # Try the SubTemplateElement constructor + parent = xmlutil.SubTemplateElement(None, 'parent') + self.assertEqual(parent.tag, 'parent') + self.assertEqual(len(parent), 0) + + # Now try it with a parent element + child = xmlutil.SubTemplateElement(parent, 'child') + self.assertEqual(child.tag, 'child') + self.assertEqual(len(parent), 1) + self.assertEqual(parent[0], child) + + def test_wrap(self): + # These are strange methods, but they make things easier + elem = xmlutil.TemplateElement('test') + self.assertEqual(elem.unwrap(), elem) + self.assertEqual(elem.wrap().root, elem) + + def test_dyntag(self): + obj = ['a', 'b', 'c'] + + # Create a template element with a dynamic tag + tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector()) + + # Try the render + parent = etree.Element('parent') + elems = tmpl_elem.render(parent, obj) + + # Verify the particulars of the render + self.assertEqual(len(elems), len(obj)) + for idx in range(len(obj)): + self.assertEqual(elems[idx][0].tag, obj[idx]) + + +class TemplateTest(test.TestCase): + def test_wrap(self): + # These are strange methods, but they make things easier + elem = xmlutil.TemplateElement('test') + tmpl = xmlutil.Template(elem) + self.assertEqual(tmpl.unwrap(), elem) + self.assertEqual(tmpl.wrap(), tmpl) + + def test__siblings(self): + # Set up a basic template + elem = xmlutil.TemplateElement('test') + tmpl = xmlutil.Template(elem) + + # Check that we get the right siblings + siblings = tmpl._siblings() + self.assertEqual(len(siblings), 1) + self.assertEqual(siblings[0], elem) + + def test__nsmap(self): + # Set up a basic template + elem = xmlutil.TemplateElement('test') + tmpl = xmlutil.Template(elem, nsmap=dict(a="foo")) + + # Check out that we get the right namespace dictionary + nsmap = tmpl._nsmap() + self.assertNotEqual(id(nsmap), id(tmpl.nsmap)) + self.assertEqual(len(nsmap), 1) + self.assertEqual(nsmap['a'], 'foo') + + def test_master_attach(self): + # Set up a master template + elem = xmlutil.TemplateElement('test') + tmpl = xmlutil.MasterTemplate(elem, 1) + + # Make sure it has a root but no slaves + self.assertEqual(tmpl.root, elem) + self.assertEqual(len(tmpl.slaves), 0) + + # Try to attach an invalid slave + bad_elem = xmlutil.TemplateElement('test2') + self.assertRaises(ValueError, tmpl.attach, bad_elem) + self.assertEqual(len(tmpl.slaves), 0) + + # Try to attach an invalid and a valid slave + good_elem = xmlutil.TemplateElement('test') + self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) + self.assertEqual(len(tmpl.slaves), 0) + + # Try to attach an inapplicable template + class InapplicableTemplate(xmlutil.Template): + def apply(self, master): + return False + inapp_tmpl = InapplicableTemplate(good_elem) + tmpl.attach(inapp_tmpl) + self.assertEqual(len(tmpl.slaves), 0) + + # Now try attaching an applicable template + tmpl.attach(good_elem) + self.assertEqual(len(tmpl.slaves), 1) + self.assertEqual(tmpl.slaves[0].root, good_elem) + + def test_master_copy(self): + # Construct a master template + elem = xmlutil.TemplateElement('test') + tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) + + # Give it a slave + slave = xmlutil.TemplateElement('test') + tmpl.attach(slave) + + # Construct a copy + copy = tmpl.copy() + + # Check to see if we actually managed a copy + self.assertNotEqual(tmpl, copy) + self.assertEqual(tmpl.root, copy.root) + self.assertEqual(tmpl.version, copy.version) + self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) + self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) + self.assertEqual(len(tmpl.slaves), len(copy.slaves)) + self.assertEqual(tmpl.slaves[0], copy.slaves[0]) + + def test_slave_apply(self): + # Construct a master template + elem = xmlutil.TemplateElement('test') + master = xmlutil.MasterTemplate(elem, 3) + + # Construct a slave template with applicable minimum version + slave = xmlutil.SlaveTemplate(elem, 2) + self.assertEqual(slave.apply(master), True) + + # Construct a slave template with equal minimum version + slave = xmlutil.SlaveTemplate(elem, 3) + self.assertEqual(slave.apply(master), True) + + # Construct a slave template with inapplicable minimum version + slave = xmlutil.SlaveTemplate(elem, 4) + self.assertEqual(slave.apply(master), False) + + # Construct a slave template with applicable version range + slave = xmlutil.SlaveTemplate(elem, 2, 4) + self.assertEqual(slave.apply(master), True) + + # Construct a slave template with low version range + slave = xmlutil.SlaveTemplate(elem, 1, 2) + self.assertEqual(slave.apply(master), False) + + # Construct a slave template with high version range + slave = xmlutil.SlaveTemplate(elem, 4, 5) + self.assertEqual(slave.apply(master), False) + + # Construct a slave template with matching version range + slave = xmlutil.SlaveTemplate(elem, 3, 3) + self.assertEqual(slave.apply(master), True) + + def test__serialize(self): + # Our test object to serialize + obj = {'test': {'name': 'foobar', + 'values': [1, 2, 3, 4], + 'attrs': {'a': 1, + 'b': 2, + 'c': 3, + 'd': 4, }, + 'image': {'name': 'image_foobar', 'id': 42, }, }, } + + # Set up our master template + root = xmlutil.TemplateElement('test', selector='test', + name='name') + value = xmlutil.SubTemplateElement(root, 'value', selector='values') + value.text = xmlutil.Selector() + attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') + xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, + key=0, value=1) + master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) + + # Set up our slave template + root_slave = xmlutil.TemplateElement('test', selector='test') + image = xmlutil.SubTemplateElement(root_slave, 'image', + selector='image', id='id') + image.text = xmlutil.Selector('name') + slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) + + # Attach the slave to the master... + master.attach(slave) + + # Try serializing our object + siblings = master._siblings() + nsmap = master._nsmap() + result = master._serialize(None, obj, siblings, nsmap) + + # Now we get to manually walk the element tree... + self.assertEqual(result.tag, 'test') + self.assertEqual(len(result.nsmap), 2) + self.assertEqual(result.nsmap['f'], 'foo') + self.assertEqual(result.nsmap['b'], 'bar') + self.assertEqual(result.get('name'), obj['test']['name']) + for idx, val in enumerate(obj['test']['values']): + self.assertEqual(result[idx].tag, 'value') + self.assertEqual(result[idx].text, str(val)) + idx += 1 + self.assertEqual(result[idx].tag, 'attrs') + for attr in result[idx]: + self.assertEqual(attr.tag, 'attr') + self.assertEqual(attr.get('value'), + str(obj['test']['attrs'][attr.get('key')])) + idx += 1 + self.assertEqual(result[idx].tag, 'image') + self.assertEqual(result[idx].get('id'), + str(obj['test']['image']['id'])) + self.assertEqual(result[idx].text, obj['test']['image']['name']) + + +class MasterTemplateBuilder(xmlutil.TemplateBuilder): + def construct(self): + elem = xmlutil.TemplateElement('test') + return xmlutil.MasterTemplate(elem, 1) + + +class SlaveTemplateBuilder(xmlutil.TemplateBuilder): + def construct(self): + elem = xmlutil.TemplateElement('test') + return xmlutil.SlaveTemplate(elem, 1) + + +class TemplateBuilderTest(test.TestCase): + def test_master_template_builder(self): + # Make sure the template hasn't been built yet + self.assertEqual(MasterTemplateBuilder._tmpl, None) + + # Now, construct the template + tmpl1 = MasterTemplateBuilder() + + # Make sure that there is a template cached... + self.assertNotEqual(MasterTemplateBuilder._tmpl, None) + + # Make sure it wasn't what was returned... + self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) + + # Make sure it doesn't get rebuilt + cached = MasterTemplateBuilder._tmpl + tmpl2 = MasterTemplateBuilder() + self.assertEqual(MasterTemplateBuilder._tmpl, cached) + + # Make sure we're always getting fresh copies + self.assertNotEqual(tmpl1, tmpl2) + + # Make sure we can override the copying behavior + tmpl3 = MasterTemplateBuilder(False) + self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) + + def test_slave_template_builder(self): + # Make sure the template hasn't been built yet + self.assertEqual(SlaveTemplateBuilder._tmpl, None) + + # Now, construct the template + tmpl1 = SlaveTemplateBuilder() + + # Make sure there is a template cached... + self.assertNotEqual(SlaveTemplateBuilder._tmpl, None) + + # Make sure it was what was returned... + self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + + # Make sure it doesn't get rebuilt + tmpl2 = SlaveTemplateBuilder() + self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + + # Make sure we're always getting the cached copy + self.assertEqual(tmpl1, tmpl2) + + +class MiscellaneousXMLUtilTests(test.TestCase): + def test_make_flat_dict(self): + expected_xml = ("\n" + 'foobar') + root = xmlutil.make_flat_dict('wrapper') + tmpl = xmlutil.MasterTemplate(root, 1) + result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) + self.assertEqual(result, expected_xml) diff --git a/cinder/tests/api/v1/__init__.py b/cinder/tests/api/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/v1/stubs.py b/cinder/tests/api/v1/stubs.py new file mode 100644 index 0000000000..72d91dbf1b --- /dev/null +++ b/cinder/tests/api/v1/stubs.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import exception as exc + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +def stub_volume(id, **kwargs): + volume = { + 'id': id, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'instance_uuid': 'fakeuuid', + 'mountpoint': '/', + 'status': 'fakestatus', + 'attach_status': 'attached', + 'bootable': 'false', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': None, + 'source_volid': None, + 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', + 'volume_metadata': [], + 'volume_type': {'name': 'vol_type_name'}} + + volume.update(kwargs) + return volume + + +def stub_volume_create(self, context, size, name, description, snapshot, + **param): + vol = stub_volume('1') + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['source_volid'] = None + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None + vol['availability_zone'] = param.get('availability_zone', 'fakeaz') + return vol + + +def stub_volume_create_from_image(self, context, size, name, description, + snapshot, volume_type, metadata, + availability_zone): + vol = stub_volume('1') + vol['status'] = 'creating' + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['availability_zone'] = 'cinder' + return vol + + +def stub_volume_update(self, context, *args, **param): + pass + + +def stub_volume_delete(self, context, *args, **param): + pass + + +def stub_volume_get(self, context, volume_id): + return stub_volume(volume_id) + + +def stub_volume_get_notfound(self, context, volume_id): + raise exc.NotFound + + +def stub_volume_get_all(context, search_opts=None): + return [stub_volume(100, project_id='fake'), + stub_volume(101, project_id='superfake'), + stub_volume(102, project_id='superduperfake')] + + +def stub_volume_get_all_by_project(self, context, search_opts=None): + return [stub_volume_get(self, context, '1')] + + +def stub_snapshot(id, **kwargs): + snapshot = {'id': id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake'} + + snapshot.update(kwargs) + return snapshot + + +def stub_snapshot_get_all(self): + return [stub_snapshot(100, project_id='fake'), + stub_snapshot(101, project_id='superfake'), + stub_snapshot(102, project_id='superduperfake')] + + +def stub_snapshot_get_all_by_project(self, context): + return [stub_snapshot(1)] + + +def stub_snapshot_update(self, context, *args, **param): + pass diff --git a/cinder/tests/api/v1/test_limits.py b/cinder/tests/api/v1/test_limits.py new file mode 100644 index 0000000000..74110bb872 --- /dev/null +++ b/cinder/tests/api/v1/test_limits.py @@ -0,0 +1,895 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests dealing with HTTP rate-limiting. +""" + +import httplib +import StringIO +from xml.dom import minidom + +from lxml import etree +import webob + +from cinder.api.v1 import limits +from cinder.api import views +from cinder.api import xmlutil +import cinder.context +from cinder.openstack.common import jsonutils +from cinder import test + + +TEST_LIMITS = [ + limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), + limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), + limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), + limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), + limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), +] +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/common/api/v1.0' +} + + +class BaseLimitTestSuite(test.TestCase): + """Base test suite which provides relevant stubs and time abstraction.""" + + def setUp(self): + super(BaseLimitTestSuite, self).setUp() + self.time = 0.0 + self.stubs.Set(limits.Limit, "_get_time", self._get_time) + self.absolute_limits = {} + + def stub_get_project_quotas(context, project_id, usages=True): + return dict((k, dict(limit=v)) + for k, v in self.absolute_limits.items()) + + self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas", + stub_get_project_quotas) + + def _get_time(self): + """Return the "time" according to this test suite.""" + return self.time + + +class LimitsControllerTest(BaseLimitTestSuite): + """ + Tests for `limits.LimitsController` class. + """ + + def setUp(self): + """Run before each test.""" + super(LimitsControllerTest, self).setUp() + self.controller = limits.create_resource() + + def _get_index_request(self, accept_header="application/json"): + """Helper to set routing arguments.""" + request = webob.Request.blank("/") + request.accept = accept_header + request.environ["wsgiorg.routing_args"] = (None, { + "action": "index", + "controller": "", + }) + context = cinder.context.RequestContext('testuser', 'testproject') + request.environ["cinder.context"] = context + return request + + def _populate_limits(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), + limits.Limit("GET", "changes-since*", "changes-since", + 5, 60).display(), + ] + request.environ["cinder.limits"] = _limits + return request + + def test_empty_index_json(self): + """Test getting empty limit details in JSON.""" + request = self._get_index_request() + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [], + "absolute": {}, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def test_index_json(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits(request) + self.absolute_limits = { + 'gigabytes': 512, + 'volumes': 5, + } + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + { + "verb": "POST", + "next-available": "1970-01-01T00:00:00Z", + "unit": "HOUR", + "value": 5, + "remaining": 5, + }, + ], + }, + { + "regex": "changes-since", + "uri": "changes-since*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 5, + "remaining": 5, + }, + ], + }, + + ], + "absolute": {"maxTotalVolumeGigabytes": 512, + "maxTotalVolumes": 5, }, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def _populate_limits_diff_regex(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("GET", "*", "*.*", 10, 60).display(), + ] + request.environ["cinder.limits"] = _limits + return request + + def test_index_diff_regex(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits_diff_regex(request) + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + { + "regex": "*.*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + + ], + "absolute": {}, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def _test_index_absolute_limits_json(self, expected): + request = self._get_index_request() + response = request.get_response(self.controller) + body = jsonutils.loads(response.body) + self.assertEqual(expected, body['limits']['absolute']) + + def test_index_ignores_extra_absolute_limits_json(self): + self.absolute_limits = {'unknown_limit': 9001} + self._test_index_absolute_limits_json({}) + + +class TestLimiter(limits.Limiter): + pass + + +class LimitMiddlewareTest(BaseLimitTestSuite): + """ + Tests for the `limits.RateLimitingMiddleware` class. + """ + + @webob.dec.wsgify + def _empty_app(self, request): + """Do-nothing WSGI app.""" + pass + + def setUp(self): + """Prepare middleware for use through fake WSGI app.""" + super(LimitMiddlewareTest, self).setUp() + _limits = '(GET, *, .*, 1, MINUTE)' + self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, + "%s.TestLimiter" % + self.__class__.__module__) + + def test_limit_class(self): + """Test that middleware selected correct limiter class.""" + assert isinstance(self.app._limiter, TestLimiter) + + def test_good_request(self): + """Test successful GET request through middleware.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_limited_request_json(self): + """Test a rate-limited (413) GET request through middleware.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(response.status_int, 413) + + self.assertTrue('Retry-After' in response.headers) + retry_after = int(response.headers['Retry-After']) + self.assertAlmostEqual(retry_after, 60, 1) + + body = jsonutils.loads(response.body) + expected = "Only 1 GET request(s) can be made to * every minute." + value = body["overLimitFault"]["details"].strip() + self.assertEqual(value, expected) + + def test_limited_request_xml(self): + """Test a rate-limited (413) response as XML""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + request = webob.Request.blank("/") + request.accept = "application/xml" + response = request.get_response(self.app) + self.assertEqual(response.status_int, 413) + + root = minidom.parseString(response.body).childNodes[0] + expected = "Only 1 GET request(s) can be made to * every minute." + + details = root.getElementsByTagName("details") + self.assertEqual(details.length, 1) + + value = details.item(0).firstChild.data.strip() + self.assertEqual(value, expected) + + +class LimitTest(BaseLimitTestSuite): + """ + Tests for the `limits.Limit` class. + """ + + def test_GET_no_delay(self): + """Test a limit handles 1 GET per second.""" + limit = limits.Limit("GET", "*", ".*", 1, 1) + delay = limit("GET", "/anything") + self.assertEqual(None, delay) + self.assertEqual(0, limit.next_request) + self.assertEqual(0, limit.last_request) + + def test_GET_delay(self): + """Test two calls to 1 GET per second limit.""" + limit = limits.Limit("GET", "*", ".*", 1, 1) + delay = limit("GET", "/anything") + self.assertEqual(None, delay) + + delay = limit("GET", "/anything") + self.assertEqual(1, delay) + self.assertEqual(1, limit.next_request) + self.assertEqual(0, limit.last_request) + + self.time += 4 + + delay = limit("GET", "/anything") + self.assertEqual(None, delay) + self.assertEqual(4, limit.next_request) + self.assertEqual(4, limit.last_request) + + +class ParseLimitsTest(BaseLimitTestSuite): + """ + Tests for the default limits parser in the in-memory + `limits.Limiter` class. + """ + + def test_invalid(self): + """Test that parse_limits() handles invalid input correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + ';;;;;') + + def test_bad_rule(self): + """Test that parse_limits() handles bad rules correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + 'GET, *, .*, 20, minute') + + def test_missing_arg(self): + """Test that parse_limits() handles missing args correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20)') + + def test_bad_value(self): + """Test that parse_limits() handles bad values correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, foo, minute)') + + def test_bad_unit(self): + """Test that parse_limits() handles bad units correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20, lightyears)') + + def test_multiple_rules(self): + """Test that parse_limits() handles multiple rules correctly.""" + try: + l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' + '(PUT, /foo*, /foo.*, 10, hour);' + '(POST, /bar*, /bar.*, 5, second);' + '(Say, /derp*, /derp.*, 1, day)') + except ValueError, e: + assert False, str(e) + + # Make sure the number of returned limits are correct + self.assertEqual(len(l), 4) + + # Check all the verbs... + expected = ['GET', 'PUT', 'POST', 'SAY'] + self.assertEqual([t.verb for t in l], expected) + + # ...the URIs... + expected = ['*', '/foo*', '/bar*', '/derp*'] + self.assertEqual([t.uri for t in l], expected) + + # ...the regexes... + expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] + self.assertEqual([t.regex for t in l], expected) + + # ...the values... + expected = [20, 10, 5, 1] + self.assertEqual([t.value for t in l], expected) + + # ...and the units... + expected = [limits.PER_MINUTE, limits.PER_HOUR, + limits.PER_SECOND, limits.PER_DAY] + self.assertEqual([t.unit for t in l], expected) + + +class LimiterTest(BaseLimitTestSuite): + """ + Tests for the in-memory `limits.Limiter` class. + """ + + def setUp(self): + """Run before each test.""" + super(LimiterTest, self).setUp() + userlimits = {'user:user3': ''} + self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) + + def _check(self, num, verb, url, username=None): + """Check and yield results from checks.""" + for x in xrange(num): + yield self.limiter.check_for_delay(verb, url, username)[0] + + def _check_sum(self, num, verb, url, username=None): + """Check and sum results from checks.""" + results = self._check(num, verb, url, username) + return sum(item for item in results if item) + + def test_no_delay_GET(self): + """ + Simple test to ensure no delay on a single call for a limit verb we + didn"t set. + """ + delay = self.limiter.check_for_delay("GET", "/anything") + self.assertEqual(delay, (None, None)) + + def test_no_delay_PUT(self): + """ + Simple test to ensure no delay on a single call for a known limit. + """ + delay = self.limiter.check_for_delay("PUT", "/anything") + self.assertEqual(delay, (None, None)) + + def test_delay_PUT(self): + """ + Ensure the 11th PUT will result in a delay of 6.0 seconds until + the next request will be granced. + """ + expected = [None] * 10 + [6.0] + results = list(self._check(11, "PUT", "/anything")) + + self.assertEqual(expected, results) + + def test_delay_POST(self): + """ + Ensure the 8th POST will result in a delay of 6.0 seconds until + the next request will be granced. + """ + expected = [None] * 7 + results = list(self._check(7, "POST", "/anything")) + self.assertEqual(expected, results) + + expected = 60.0 / 7.0 + results = self._check_sum(1, "POST", "/anything") + self.failUnlessAlmostEqual(expected, results, 8) + + def test_delay_GET(self): + """ + Ensure the 11th GET will result in NO delay. + """ + expected = [None] * 11 + results = list(self._check(11, "GET", "/anything")) + + self.assertEqual(expected, results) + + def test_delay_PUT_volumes(self): + """ + Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still + OK after 5 requests...but then after 11 total requests, PUT limiting + kicks in. + """ + # First 6 requests on PUT /volumes + expected = [None] * 5 + [12.0] + results = list(self._check(6, "PUT", "/volumes")) + self.assertEqual(expected, results) + + # Next 5 request on PUT /anything + expected = [None] * 4 + [6.0] + results = list(self._check(5, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_delay_PUT_wait(self): + """ + Ensure after hitting the limit and then waiting for the correct + amount of time, the limit will be lifted. + """ + expected = [None] * 10 + [6.0] + results = list(self._check(11, "PUT", "/anything")) + self.assertEqual(expected, results) + + # Advance time + self.time += 6.0 + + expected = [None, 6.0] + results = list(self._check(2, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_multiple_delays(self): + """ + Ensure multiple requests still get a delay. + """ + expected = [None] * 10 + [6.0] * 10 + results = list(self._check(20, "PUT", "/anything")) + self.assertEqual(expected, results) + + self.time += 1.0 + + expected = [5.0] * 10 + results = list(self._check(10, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_user_limit(self): + """ + Test user-specific limits. + """ + self.assertEqual(self.limiter.levels['user3'], []) + + def test_multiple_users(self): + """ + Tests involving multiple users. + """ + # User1 + expected = [None] * 10 + [6.0] * 10 + results = list(self._check(20, "PUT", "/anything", "user1")) + self.assertEqual(expected, results) + + # User2 + expected = [None] * 10 + [6.0] * 5 + results = list(self._check(15, "PUT", "/anything", "user2")) + self.assertEqual(expected, results) + + # User3 + expected = [None] * 20 + results = list(self._check(20, "PUT", "/anything", "user3")) + self.assertEqual(expected, results) + + self.time += 1.0 + + # User1 again + expected = [5.0] * 10 + results = list(self._check(10, "PUT", "/anything", "user1")) + self.assertEqual(expected, results) + + self.time += 1.0 + + # User1 again + expected = [4.0] * 5 + results = list(self._check(5, "PUT", "/anything", "user2")) + self.assertEqual(expected, results) + + +class WsgiLimiterTest(BaseLimitTestSuite): + """ + Tests for `limits.WsgiLimiter` class. + """ + + def setUp(self): + """Run before each test.""" + super(WsgiLimiterTest, self).setUp() + self.app = limits.WsgiLimiter(TEST_LIMITS) + + def _request_data(self, verb, path): + """Get data decribing a limit request verb/path.""" + return jsonutils.dumps({"verb": verb, "path": path}) + + def _request(self, verb, url, username=None): + """Make sure that POSTing to the given url causes the given username + to perform the given action. Make the internal rate limiter return + delay and make sure that the WSGI app returns the correct response. + """ + if username: + request = webob.Request.blank("/%s" % username) + else: + request = webob.Request.blank("/") + + request.method = "POST" + request.body = self._request_data(verb, url) + response = request.get_response(self.app) + + if "X-Wait-Seconds" in response.headers: + self.assertEqual(response.status_int, 403) + return response.headers["X-Wait-Seconds"] + + self.assertEqual(response.status_int, 204) + + def test_invalid_methods(self): + """Only POSTs should work.""" + requests = [] + for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: + request = webob.Request.blank("/", method=method) + response = request.get_response(self.app) + self.assertEqual(response.status_int, 405) + + def test_good_url(self): + delay = self._request("GET", "/something") + self.assertEqual(delay, None) + + def test_escaping(self): + delay = self._request("GET", "/something/jump%20up") + self.assertEqual(delay, None) + + def test_response_to_delays(self): + delay = self._request("GET", "/delayed") + self.assertEqual(delay, None) + + delay = self._request("GET", "/delayed") + self.assertEqual(delay, '60.00') + + def test_response_to_delays_usernames(self): + delay = self._request("GET", "/delayed", "user1") + self.assertEqual(delay, None) + + delay = self._request("GET", "/delayed", "user2") + self.assertEqual(delay, None) + + delay = self._request("GET", "/delayed", "user1") + self.assertEqual(delay, '60.00') + + delay = self._request("GET", "/delayed", "user2") + self.assertEqual(delay, '60.00') + + +class FakeHttplibSocket(object): + """ + Fake `httplib.HTTPResponse` replacement. + """ + + def __init__(self, response_string): + """Initialize new `FakeHttplibSocket`.""" + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer.""" + return self._buffer + + +class FakeHttplibConnection(object): + """ + Fake `httplib.HTTPConnection`. + """ + + def __init__(self, app, host): + """ + Initialize `FakeHttplibConnection`. + """ + self.app = app + self.host = host + + def request(self, method, path, body="", headers=None): + """ + Requests made via this connection actually get translated and routed + into our WSGI app, we then wait for the response and turn it back into + an `httplib.HTTPResponse`. + """ + if not headers: + headers = {} + + req = webob.Request.blank(path) + req.method = method + req.headers = headers + req.host = self.host + req.body = body + + resp = str(req.get_response(self.app)) + resp = "HTTP/1.0 %s" % resp + sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(sock) + self.http_response.begin() + + def getresponse(self): + """Return our generated response from the request.""" + return self.http_response + + +def wire_HTTPConnection_to_WSGI(host, app): + """Monkeypatches HTTPConnection so that if you try to connect to host, you + are instead routed straight to the given WSGI app. + + After calling this method, when any code calls + + httplib.HTTPConnection(host) + + the connection object will be a fake. Its requests will be sent directly + to the given WSGI app rather than through a socket. + + Code connecting to hosts other than host will not be affected. + + This method may be called multiple times to map different hosts to + different apps. + + This method returns the original HTTPConnection object, so that the caller + can restore the default HTTPConnection interface (for all hosts). + """ + class HTTPConnectionDecorator(object): + """Wraps the real HTTPConnection class so that when you instantiate + the class you might instead get a fake instance.""" + + def __init__(self, wrapped): + self.wrapped = wrapped + + def __call__(self, connection_host, *args, **kwargs): + if connection_host == host: + return FakeHttplibConnection(app, host) + else: + return self.wrapped(connection_host, *args, **kwargs) + + oldHTTPConnection = httplib.HTTPConnection + httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) + return oldHTTPConnection + + +class WsgiLimiterProxyTest(BaseLimitTestSuite): + """ + Tests for the `limits.WsgiLimiterProxy` class. + """ + + def setUp(self): + """ + Do some nifty HTTP/WSGI magic which allows for WSGI to be called + directly by something like the `httplib` library. + """ + super(WsgiLimiterProxyTest, self).setUp() + self.app = limits.WsgiLimiter(TEST_LIMITS) + self.oldHTTPConnection = ( + wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) + self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") + + def test_200(self): + """Successful request test.""" + delay = self.proxy.check_for_delay("GET", "/anything") + self.assertEqual(delay, (None, None)) + + def test_403(self): + """Forbidden request test.""" + delay = self.proxy.check_for_delay("GET", "/delayed") + self.assertEqual(delay, (None, None)) + + delay, error = self.proxy.check_for_delay("GET", "/delayed") + error = error.strip() + + expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be " + "made to /delayed every minute.") + + self.assertEqual((delay, error), expected) + + def tearDown(self): + # restore original HTTPConnection object + httplib.HTTPConnection = self.oldHTTPConnection + + +class LimitsViewBuilderTest(test.TestCase): + def setUp(self): + super(LimitsViewBuilderTest, self).setUp() + self.view_builder = views.limits.ViewBuilder() + self.rate_limits = [{"URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226}, + {"URI": "*/volumes", + "regex": "^/volumes", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226}] + self.absolute_limits = {"metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5} + + def test_build_limits(self): + tdate = "2011-07-21T18:17:06Z" + expected_limits = \ + {"limits": {"rate": [{"uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": tdate}]}, + {"uri": "*/volumes", + "regex": "^/volumes", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": tdate}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5}}} + + output = self.view_builder.build(self.rate_limits, + self.absolute_limits) + self.assertDictMatch(output, expected_limits) + + def test_build_limits_empty_limits(self): + expected_limits = {"limits": {"rate": [], + "absolute": {}}} + + abs_limits = {} + rate_limits = [] + output = self.view_builder.build(rate_limits, abs_limits) + self.assertDictMatch(output, expected_limits) + + +class LimitsXMLSerializationTest(test.TestCase): + def test_xml_declaration(self): + serializer = limits.LimitsTemplate() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture) + has_dec = output.startswith("") + self.assertTrue(has_dec) + + def test_index(self): + serializer = limits.LimitsTemplate() + fixture = { + "limits": { + "rate": [{ + "uri": "*", + "regex": ".*", + "limit": [{ + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z"}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{ + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z"}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240}}} + + output = serializer.serialize(fixture) + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 4) + for limit in absolutes: + name = limit.get('name') + value = limit.get('value') + self.assertEqual(value, str(fixture['limits']['absolute'][name])) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 2) + for i, rate in enumerate(rates): + for key in ['uri', 'regex']: + self.assertEqual(rate.get(key), + str(fixture['limits']['rate'][i][key])) + rate_limits = rate.xpath('ns:limit', namespaces=NS) + self.assertEqual(len(rate_limits), 1) + for j, limit in enumerate(rate_limits): + for key in ['verb', 'value', 'remaining', 'unit', + 'next-available']: + self.assertEqual( + limit.get(key), + str(fixture['limits']['rate'][i]['limit'][j][key])) + + def test_index_no_limits(self): + serializer = limits.LimitsTemplate() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture) + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 0) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 0) diff --git a/cinder/tests/api/v1/test_snapshot_metadata.py b/cinder/tests/api/v1/test_snapshot_metadata.py new file mode 100644 index 0000000000..5720e88dc9 --- /dev/null +++ b/cinder/tests/api/v1/test_snapshot_metadata.py @@ -0,0 +1,458 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v1 import snapshot_metadata +from cinder.api.v1 import snapshots +import cinder.db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes + + +CONF = cfg.CONF + + +def return_create_snapshot_metadata_max(context, + snapshot_id, + metadata, + delete): + return stub_max_snapshot_metadata() + + +def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): + return stub_snapshot_metadata() + + +def return_snapshot_metadata(context, snapshot_id): + if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36: + msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id + raise Exception(msg) + return stub_snapshot_metadata() + + +def return_empty_snapshot_metadata(context, snapshot_id): + return {} + + +def delete_snapshot_metadata(context, snapshot_id, key): + pass + + +def stub_snapshot_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_max_snapshot_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_snapshot(context, snapshot_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'status': 'available', + 'metadata': {}} + + +def return_volume(context, volume_id): + return {'id': 'fake-vol-id', + 'size': 100, + 'name': 'fake', + 'host': 'fake-host', + 'status': 'available', + 'metadata': {}} + + +def return_snapshot_nonexistent(context, snapshot_id): + raise exception.SnapshotNotFound('bogus test message') + + +def fake_update_snapshot_metadata(self, context, snapshot, diff): + pass + + +class SnapshotMetaDataTest(test.TestCase): + + def setUp(self): + super(SnapshotMetaDataTest, self).setUp() + self.volume_api = cinder.volume.api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(cinder.db, 'volume_get', return_volume) + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + + self.stubs.Set(self.volume_api, 'update_snapshot_metadata', + fake_update_snapshot_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) + self.controller = snapshot_metadata.Controller() + self.id = str(uuid.uuid4()) + self.url = '/v1/fake/snapshots/%s/metadata' % self.id + + snap = {"volume_size": 100, + "volume_id": "fake-vol-id", + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "host": "fake-host", + "metadata": {}} + body = {"snapshot": snap} + req = fakes.HTTPRequest.blank('/v1/snapshots') + self.snapshot_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.id, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_delete', + delete_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.id, 'key6') + + def test_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.id, body) + self.assertEqual(body, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.id, body) + + def test_create_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.id, body) + + def test_update_all(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.id, expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.id, expected) + + def test_update_all_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank( + '/v1.1/fake/snapshots/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.id, 'key1', body) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'key1', None) + + def test_update_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'key1', body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'bad', body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, data) diff --git a/cinder/tests/api/v1/test_snapshots.py b/cinder/tests/api/v1/test_snapshots.py new file mode 100644 index 0000000000..6a13179ac3 --- /dev/null +++ b/cinder/tests/api/v1/test_snapshots.py @@ -0,0 +1,417 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api.v1 import snapshots +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v1 import stubs +from cinder import volume + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + +UUID = '00000000-0000-0000-0000-000000000001' +INVALID_UUID = '00000000-0000-0000-0000-000000000002' + + +def _get_default_snapshot_param(): + return {'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', } + + +def stub_snapshot_create(self, context, + volume_id, name, + description, metadata): + snapshot = _get_default_snapshot_param() + snapshot['volume_id'] = volume_id + snapshot['display_name'] = name + snapshot['display_description'] = description + snapshot['metadata'] = metadata + return snapshot + + +def stub_snapshot_delete(self, context, snapshot): + if snapshot['id'] != UUID: + raise exception.NotFound + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != UUID: + raise exception.NotFound + + param = _get_default_snapshot_param() + return param + + +def stub_snapshot_get_all(self, context, search_opts=None): + param = _get_default_snapshot_param() + return [param] + + +class SnapshotApiTest(test.TestCase): + def setUp(self): + super(SnapshotApiTest, self).setUp() + self.controller = snapshots.SnapshotsController() + + self.stubs.Set(db, 'snapshot_get_all_by_project', + stubs.stub_snapshot_get_all_by_project) + self.stubs.Set(db, 'snapshot_get_all', + stubs.stub_snapshot_get_all) + + def test_snapshot_create(self): + self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) + snapshot = {"volume_id": '12', + "force": False, + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v1/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['display_name'], + snapshot['display_name']) + self.assertEqual(resp_dict['snapshot']['display_description'], + snapshot['display_description']) + + def test_snapshot_create_force(self): + self.stubs.Set(volume.api.API, + "create_snapshot_force", + stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) + snapshot = {"volume_id": '12', + "force": True, + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v1/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['display_name'], + snapshot['display_name']) + self.assertEqual(resp_dict['snapshot']['display_description'], + snapshot['display_description']) + + snapshot = {"volume_id": "12", + "force": "**&&^^%%$$##@@", + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v1/snapshots') + self.assertRaises(exception.InvalidParameterValue, + self.controller.create, + req, + body) + + def test_snapshot_update(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "update_snapshot", + stubs.stub_snapshot_update) + updates = {"display_name": "Updated Test Name", } + body = {"snapshot": updates} + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) + res_dict = self.controller.update(req, UUID, body) + expected = {'snapshot': { + 'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'size': 100, + 'created_at': None, + 'display_name': 'Updated Test Name', + 'display_description': 'Default description', + 'metadata': {}, + }} + self.assertEquals(expected, res_dict) + + def test_snapshot_update_missing_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.update, req, UUID, body) + + def test_snapshot_update_invalid_body(self): + body = {'display_name': 'missing top level snapshot key'} + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.update, req, UUID, body) + + def test_snapshot_update_not_found(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + updates = { + "display_name": "Updated Test Name", + } + body = {"snapshot": updates} + req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, + 'not-the-uuid', body) + + def test_snapshot_delete(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + + snapshot_id = UUID + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) + resp = self.controller.delete(req, snapshot_id) + self.assertEqual(resp.status_int, 202) + + def test_snapshot_delete_invalid_id(self): + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + snapshot_id) + + def test_snapshot_show(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) + resp_dict = self.controller.show(req, UUID) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['id'], UUID) + + def test_snapshot_show_invalid_id(self): + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, + snapshot_id) + + def test_snapshot_detail(self): + self.stubs.Set(volume.api.API, + "get_all_snapshots", + stub_snapshot_get_all) + req = fakes.HTTPRequest.blank('/v1/snapshots/detail') + resp_dict = self.controller.detail(req) + + self.assertTrue('snapshots' in resp_dict) + resp_snapshots = resp_dict['snapshots'] + self.assertEqual(len(resp_snapshots), 1) + + resp_snapshot = resp_snapshots.pop() + self.assertEqual(resp_snapshot['id'], UUID) + + def test_snapshot_list_by_status(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1', + status='available'), + stubs.stub_snapshot(2, display_name='backup2', + status='available'), + stubs.stub_snapshot(3, display_name='backup3', + status='creating'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # no status filter + req = fakes.HTTPRequest.blank('/v1/snapshots') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 3) + # single match + req = fakes.HTTPRequest.blank('/v1/snapshots?status=creating') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['status'], 'creating') + # multiple match + req = fakes.HTTPRequest.blank('/v1/snapshots?status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 2) + for snapshot in resp['snapshots']: + self.assertEquals(snapshot['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v1/snapshots?status=error') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 0) + + def test_snapshot_list_by_volume(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, volume_id='vol1', status='creating'), + stubs.stub_snapshot(2, volume_id='vol1', status='available'), + stubs.stub_snapshot(3, volume_id='vol2', status='available'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # single match + req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol2') + # multiple match + req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 2) + for snapshot in resp['snapshots']: + self.assertEqual(snapshot['volume_id'], 'vol1') + # multiple filters + req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol1' + '&status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol1') + self.assertEqual(resp['snapshots'][0]['status'], 'available') + + def test_snapshot_list_by_name(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1'), + stubs.stub_snapshot(2, display_name='backup2'), + stubs.stub_snapshot(3, display_name='backup3'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # no display_name filter + req = fakes.HTTPRequest.blank('/v1/snapshots') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 3) + # filter by one name + req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup2') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEquals(resp['snapshots'][0]['display_name'], 'backup2') + # filter no match + req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup4') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 0) + + def test_admin_list_snapshots_limited_to_project(self): + req = fakes.HTTPRequest.blank('/v1/fake/snapshots', + use_admin_context=True) + res = self.controller.index(req) + + self.assertTrue('snapshots' in res) + self.assertEqual(1, len(res['snapshots'])) + + def test_admin_list_snapshots_all_tenants(self): + req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertTrue('snapshots' in res) + self.assertEqual(3, len(res['snapshots'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1') + res = self.controller.index(req) + self.assertTrue('snapshots' in res) + self.assertEqual(1, len(res['snapshots'])) + + def test_non_admin_get_by_project(self): + req = fakes.HTTPRequest.blank('/v1/fake/snapshots') + res = self.controller.index(req) + self.assertTrue('snapshots' in res) + self.assertEqual(1, len(res['snapshots'])) + + +class SnapshotSerializerTest(test.TestCase): + def _verify_snapshot(self, snap, tree): + self.assertEqual(tree.tag, 'snapshot') + + for attr in ('id', 'status', 'size', 'created_at', + 'display_name', 'display_description', 'volume_id'): + self.assertEqual(str(snap[attr]), tree.get(attr)) + + def test_snapshot_show_create_serializer(self): + serializer = snapshots.SnapshotTemplate() + raw_snapshot = dict( + id='snap_id', + status='snap_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap_name', + display_description='snap_desc', + volume_id='vol_id', ) + text = serializer.serialize(dict(snapshot=raw_snapshot)) + + print text + tree = etree.fromstring(text) + + self._verify_snapshot(raw_snapshot, tree) + + def test_snapshot_index_detail_serializer(self): + serializer = snapshots.SnapshotsTemplate() + raw_snapshots = [dict(id='snap1_id', + status='snap1_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap1_name', + display_description='snap1_desc', + volume_id='vol1_id', ), + dict(id='snap2_id', + status='snap2_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap2_name', + display_description='snap2_desc', + volume_id='vol2_id', )] + text = serializer.serialize(dict(snapshots=raw_snapshots)) + + print text + tree = etree.fromstring(text) + + self.assertEqual('snapshots', tree.tag) + self.assertEqual(len(raw_snapshots), len(tree)) + for idx, child in enumerate(tree): + self._verify_snapshot(raw_snapshots[idx], child) + + +class SnapshotsUnprocessableEntityTestCase(test.TestCase): + + """ + Tests of places we throw 422 Unprocessable Entity from + """ + + def setUp(self): + super(SnapshotsUnprocessableEntityTestCase, self).setUp() + self.controller = snapshots.SnapshotsController() + + def _unprocessable_snapshot_create(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, req, body) + + def test_create_no_body(self): + self._unprocessable_snapshot_create(body=None) + + def test_create_missing_snapshot(self): + body = {'foo': {'a': 'b'}} + self._unprocessable_snapshot_create(body=body) + + def test_create_malformed_entity(self): + body = {'snapshot': 'string'} + self._unprocessable_snapshot_create(body=body) diff --git a/cinder/tests/api/v1/test_types.py b/cinder/tests/api/v1/test_types.py new file mode 100644 index 0000000000..97a10e7917 --- /dev/null +++ b/cinder/tests/api/v1/test_types.py @@ -0,0 +1,194 @@ +# Copyright 2011 OpenStack LLC. +# aLL Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.v1 import types +from cinder.api.views import types as views_types +from cinder import exception +from cinder.openstack.common import timeutils +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import volume_types + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) + + +def return_volume_types_get_all_types(context): + return dict(vol_type_1=stub_volume_type(1), + vol_type_2=stub_volume_type(2), + vol_type_3=stub_volume_type(3)) + + +def return_empty_volume_types_get_all_types(context): + return {} + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesApiTest, self).setUp() + self.controller = types.VolumeTypesController() + + def test_volume_types_index(self): + self.stubs.Set(volume_types, 'get_all_types', + return_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v1/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(3, len(res_dict['volume_types'])) + + expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] + actual_names = map(lambda e: e['name'], res_dict['volume_types']) + self.assertEqual(set(actual_names), set(expected_names)) + for entry in res_dict['volume_types']: + self.assertEqual('value1', entry['extra_specs']['key1']) + + def test_volume_types_index_no_data(self): + self.stubs.Set(volume_types, 'get_all_types', + return_empty_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v1/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(0, len(res_dict['volume_types'])) + + def test_volume_types_show(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v1/fake/types/1') + res_dict = self.controller.show(req, 1) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('1', res_dict['volume_type']['id']) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_volume_types_show_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v1/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, '777') + + def test_view_builder_show(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.isotime() + raw_volume_type = dict(name='new_type', + deleted=False, + created_at=now, + updated_at=now, + extra_specs={}, + deleted_at=None, + id=42) + + request = fakes.HTTPRequest.blank("/v1") + output = view_builder.show(request, raw_volume_type) + + self.assertTrue('volume_type' in output) + expected_volume_type = dict(name='new_type', + extra_specs={}, + id=42) + self.assertDictMatch(output['volume_type'], expected_volume_type) + + def test_view_builder_list(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.isotime() + raw_volume_types = [] + for i in range(0, 10): + raw_volume_types.append(dict(name='new_type', + deleted=False, + created_at=now, + updated_at=now, + extra_specs={}, + deleted_at=None, + id=42 + i)) + + request = fakes.HTTPRequest.blank("/v1") + output = view_builder.index(request, raw_volume_types) + + self.assertTrue('volume_types' in output) + for i in range(0, 10): + expected_volume_type = dict(name='new_type', + extra_specs={}, + id=42 + i) + self.assertDictMatch(output['volume_types'][i], + expected_volume_type) + + +class VolumeTypesSerializerTest(test.TestCase): + def _verify_volume_type(self, vtype, tree): + self.assertEqual('volume_type', tree.tag) + self.assertEqual(vtype['name'], tree.get('name')) + self.assertEqual(str(vtype['id']), tree.get('id')) + self.assertEqual(1, len(tree)) + extra_specs = tree[0] + self.assertEqual('extra_specs', extra_specs.tag) + seen = set(vtype['extra_specs'].keys()) + for child in extra_specs: + self.assertTrue(child.tag in seen) + self.assertEqual(vtype['extra_specs'][child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_index_serializer(self): + serializer = types.VolumeTypesTemplate() + + # Just getting some input data + vtypes = return_volume_types_get_all_types(None) + text = serializer.serialize({'volume_types': vtypes.values()}) + + tree = etree.fromstring(text) + + self.assertEqual('volume_types', tree.tag) + self.assertEqual(len(vtypes), len(tree)) + for child in tree: + name = child.get('name') + self.assertTrue(name in vtypes) + self._verify_volume_type(vtypes[name], child) + + def test_voltype_serializer(self): + serializer = types.VolumeTypeTemplate() + + vtype = stub_volume_type(1) + text = serializer.serialize(dict(volume_type=vtype)) + + tree = etree.fromstring(text) + + self._verify_volume_type(vtype, tree) diff --git a/cinder/tests/api/v1/test_volume_metadata.py b/cinder/tests/api/v1/test_volume_metadata.py new file mode 100644 index 0000000000..4dc09a8744 --- /dev/null +++ b/cinder/tests/api/v1/test_volume_metadata.py @@ -0,0 +1,441 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v1 import volume_metadata +from cinder.api.v1 import volumes +import cinder.db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes + + +CONF = cfg.CONF + + +def return_create_volume_metadata_max(context, volume_id, metadata, delete): + return stub_max_volume_metadata() + + +def return_create_volume_metadata(context, volume_id, metadata, delete): + return stub_volume_metadata() + + +def return_volume_metadata(context, volume_id): + if not isinstance(volume_id, str) or not len(volume_id) == 36: + msg = 'id %s must be a uuid in return volume metadata' % volume_id + raise Exception(msg) + return stub_volume_metadata() + + +def return_empty_volume_metadata(context, volume_id): + return {} + + +def delete_volume_metadata(context, volume_id, key): + pass + + +def stub_volume_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_max_volume_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_volume(context, volume_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'metadata': {}} + + +def return_volume_nonexistent(context, volume_id): + raise exception.VolumeNotFound('bogus test message') + + +def fake_update_volume_metadata(self, context, volume, diff): + pass + + +class volumeMetaDataTest(test.TestCase): + + def setUp(self): + super(volumeMetaDataTest, self).setUp() + self.volume_api = cinder.volume.api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(cinder.db, 'volume_get', return_volume) + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_metadata) + + self.stubs.Set(self.volume_api, 'update_volume_metadata', + fake_update_volume_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.volume_controller = volumes.VolumeController(self.ext_mgr) + self.controller = volume_metadata.Controller() + self.id = str(uuid.uuid4()) + self.url = '/v1/fake/volumes/%s/metadata' % self.id + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "metadata": {}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.volume_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.id, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_delete', + delete_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.id, 'key6') + + def test_create(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + + req = fakes.HTTPRequest.blank('/v1/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.id, body) + self.assertEqual(body, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.id, body) + + def test_create_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', + return_volume_nonexistent) + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + + req = fakes.HTTPRequest.blank('/v1/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.id, body) + + def test_update_all(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.id, expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.id, expected) + + def test_update_all_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank('/v1.1/fake/volumes/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.id, 'key1', body) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'key1', None) + + def test_update_item_empty_key(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'key1', body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'bad', body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, data) diff --git a/cinder/tests/api/v1/test_volumes.py b/cinder/tests/api/v1/test_volumes.py new file mode 100644 index 0000000000..b199ca40e8 --- /dev/null +++ b/cinder/tests/api/v1/test_volumes.py @@ -0,0 +1,764 @@ +# Copyright 2013 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api import extensions +from cinder.api.v1 import volumes +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.tests.image import fake as fake_image +from cinder.volume import api as volume_api + + +FLAGS = flags.FLAGS +NS = '{http://docs.openstack.org/volume/api/v1}' + +TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != TEST_SNAPSHOT_UUID: + raise exception.NotFound + + return {'id': snapshot_id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', } + + +class VolumeApiTest(test.TestCase): + def setUp(self): + super(VolumeApiTest, self).setUp() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + fake_image.stub_out_image_service(self.stubs) + self.controller = volumes.VolumeController(self.ext_mgr) + + self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) + + def test_volume_create(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.create(req, body) + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'Volume Test Desc', + 'availability_zone': 'zone1:host1', + 'display_name': 'Volume Test Name', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 100}} + self.assertEqual(res_dict, expected) + + def test_volume_create_with_type(self): + vol_type = FLAGS.default_volume_type + db.volume_type_create(context.get_admin_context(), + dict(name=vol_type, extra_specs={})) + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + vol_type) + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "volume_type": db_vol_type['name'], } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.create(req, body) + self.assertEquals(res_dict['volume']['volume_type'], + db_vol_type['name']) + + def test_volume_creation_fails_with_bad_size(self): + vol = {"size": '', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, + body) + + def test_volume_create_with_image_id(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77" + vol = {"size": '1', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "nova", + "imageRef": test_id} + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'Volume Test Desc', + 'availability_zone': 'nova', + 'display_name': 'Volume Test Name', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'image_id': test_id, + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': '1'}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.create(req, body) + self.assertEqual(res_dict, expected) + + def test_volume_create_with_image_id_is_integer(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = {"size": '1', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": 1234} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_create_with_image_id_not_uuid_format(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = {"size": '1', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": '12345'} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_update(self): + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + updates = { + "display_name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'Updated Test Name', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'device': '/', + }], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + }} + self.assertEquals(res_dict, expected) + + def test_volume_update_metadata(self): + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + updates = { + "metadata": {"qos_max_iops": 2000} + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'device': '/', + }], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {"qos_max_iops": 2000}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + }} + self.assertEquals(res_dict, expected) + + def test_update_empty_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.update, + req, '1', body) + + def test_update_invalid_body(self): + body = {'display_name': 'missing top level volume key'} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.update, + req, '1', body) + + def test_update_not_found(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + updates = { + "display_name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + req, '1', body) + + def test_volume_list(self): + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + + def test_volume_list_detail(self): + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v1/volumes/detail') + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + + def test_volume_list_by_name(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + stubs.stub_volume(3, display_name='vol3'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + + # no display_name filter + req = fakes.HTTPRequest.blank('/v1/volumes') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + # filter on display_name + req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['display_name'], 'vol2') + # filter no match + req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol4') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_list_by_status(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1', status='available'), + stubs.stub_volume(2, display_name='vol2', status='available'), + stubs.stub_volume(3, display_name='vol3', status='in-use'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + # no status filter + req = fakes.HTTPRequest.blank('/v1/volumes') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + # single match + req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['status'], 'in-use') + # multiple match + req = fakes.HTTPRequest.blank('/v1/volumes?status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 2) + for volume in resp['volumes']: + self.assertEqual(volume['status'], 'available') + # multiple filters + req = fakes.HTTPRequest.blank('/v1/volumes?status=available&' + 'display_name=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['display_name'], 'vol1') + self.assertEqual(resp['volumes'][0]['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use&' + 'display_name=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_show(self): + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_no_attachments(self): + def stub_volume_get(self, context, volume_id): + return stubs.stub_volume(volume_id, attach_status='detached') + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_bootable(self): + def stub_volume_get(self, context, volume_id): + return (stubs.stub_volume(volume_id, + volume_glance_metadata=dict(foo='bar'))) + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'true', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, + 1) + + def test_volume_delete(self): + req = fakes.HTTPRequest.blank('/v1/volumes/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_volume_delete_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 1) + + def test_admin_list_volumes_limited_to_project(self): + req = fakes.HTTPRequest.blank('/v1/fake/volumes', + use_admin_context=True) + res = self.controller.index(req) + + self.assertTrue('volumes' in res) + self.assertEqual(1, len(res['volumes'])) + + def test_admin_list_volumes_all_tenants(self): + req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertTrue('volumes' in res) + self.assertEqual(3, len(res['volumes'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1') + res = self.controller.index(req) + self.assertTrue('volumes' in res) + self.assertEqual(1, len(res['volumes'])) + + def test_non_admin_get_by_project(self): + req = fakes.HTTPRequest.blank('/v1/fake/volumes') + res = self.controller.index(req) + self.assertTrue('volumes' in res) + self.assertEqual(1, len(res['volumes'])) + + +class VolumeSerializerTest(test.TestCase): + def _verify_volume_attachment(self, attach, tree): + for attr in ('id', 'volume_id', 'server_id', 'device'): + self.assertEqual(str(attach[attr]), tree.get(attr)) + + def _verify_volume(self, vol, tree): + self.assertEqual(tree.tag, NS + 'volume') + + for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', + 'display_name', 'display_description', 'volume_type', + 'snapshot_id'): + self.assertEqual(str(vol[attr]), tree.get(attr)) + + for child in tree: + print child.tag + self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata')) + if child.tag == 'attachments': + self.assertEqual(1, len(child)) + self.assertEqual('attachment', child[0].tag) + self._verify_volume_attachment(vol['attachments'][0], child[0]) + elif child.tag == 'metadata': + not_seen = set(vol['metadata'].keys()) + for gr_child in child: + self.assertTrue(gr_child.get("key") in not_seen) + self.assertEqual(str(vol['metadata'][gr_child.get("key")]), + gr_child.text) + not_seen.remove(gr_child.get('key')) + self.assertEqual(0, len(not_seen)) + + def test_volume_show_create_serializer(self): + serializer = volumes.VolumeTemplate() + raw_volume = dict( + id='vol_id', + status='vol_status', + size=1024, + availability_zone='vol_availability', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol_id', + volume_id='vol_id', + server_id='instance_uuid', + device='/foo')], + display_name='vol_name', + display_description='vol_desc', + volume_type='vol_type', + snapshot_id='snap_id', + source_volid='source_volid', + metadata=dict(foo='bar', + baz='quux', ), ) + text = serializer.serialize(dict(volume=raw_volume)) + + print text + tree = etree.fromstring(text) + + self._verify_volume(raw_volume, tree) + + def test_volume_index_detail_serializer(self): + serializer = volumes.VolumesTemplate() + raw_volumes = [dict(id='vol1_id', + status='vol1_status', + size=1024, + availability_zone='vol1_availability', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol1_id', + volume_id='vol1_id', + server_id='instance_uuid', + device='/foo1')], + display_name='vol1_name', + display_description='vol1_desc', + volume_type='vol1_type', + snapshot_id='snap1_id', + source_volid=None, + metadata=dict(foo='vol1_foo', + bar='vol1_bar', ), ), + dict(id='vol2_id', + status='vol2_status', + size=1024, + availability_zone='vol2_availability', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol2_id', + volume_id='vol2_id', + server_id='instance_uuid', + device='/foo2')], + display_name='vol2_name', + display_description='vol2_desc', + volume_type='vol2_type', + snapshot_id='snap2_id', + source_volid=None, + metadata=dict(foo='vol2_foo', + bar='vol2_bar', ), )] + text = serializer.serialize(dict(volumes=raw_volumes)) + + print text + tree = etree.fromstring(text) + + self.assertEqual(NS + 'volumes', tree.tag) + self.assertEqual(len(raw_volumes), len(tree)) + for idx, child in enumerate(tree): + self._verify_volume(raw_volumes[idx], child) + + +class TestVolumeCreateRequestXMLDeserializer(test.TestCase): + + def setUp(self): + super(TestVolumeCreateRequestXMLDeserializer, self).setUp() + self.deserializer = volumes.CreateDeserializer() + + def test_minimal_volume(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = {"volume": {"size": "1", }, } + self.assertEquals(request['body'], expected) + + def test_display_name(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + }, + } + self.assertEquals(request['body'], expected) + + def test_display_description(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + }, + } + self.assertEquals(request['body'], expected) + + def test_volume_type(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "display_name": "Volume-xml", + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + }, + } + self.assertEquals(request['body'], expected) + + def test_availability_zone(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + }, + } + self.assertEquals(request['body'], expected) + + def test_metadata(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "display_name": "Volume-xml", + "size": "1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEquals(request['body'], expected) + + def test_full_volume(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEquals(request['body'], expected) + + +class VolumesUnprocessableEntityTestCase(test.TestCase): + + """ + Tests of places we throw 422 Unprocessable Entity from + """ + + def setUp(self): + super(VolumesUnprocessableEntityTestCase, self).setUp() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = volumes.VolumeController(self.ext_mgr) + + def _unprocessable_volume_create(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, req, body) + + def test_create_no_body(self): + self._unprocessable_volume_create(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._unprocessable_volume_create(body=body) + + def test_create_malformed_entity(self): + body = {'volume': 'string'} + self._unprocessable_volume_create(body=body) diff --git a/cinder/tests/api/v2/__init__.py b/cinder/tests/api/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/v2/stubs.py b/cinder/tests/api/v2/stubs.py new file mode 100644 index 0000000000..c787ef78c2 --- /dev/null +++ b/cinder/tests/api/v2/stubs.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import exception as exc + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +def stub_volume(id, **kwargs): + volume = { + 'id': id, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'instance_uuid': 'fakeuuid', + 'mountpoint': '/', + 'status': 'fakestatus', + 'attach_status': 'attached', + 'bootable': 'false', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': None, + 'source_volid': None, + 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', + 'volume_metadata': [], + 'volume_type': {'name': 'vol_type_name'}} + + volume.update(kwargs) + return volume + + +def stub_volume_create(self, context, size, name, description, snapshot, + **param): + vol = stub_volume('1') + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['source_volid'] = None + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None + vol['availability_zone'] = param.get('availability_zone', 'fakeaz') + return vol + + +def stub_volume_create_from_image(self, context, size, name, description, + snapshot, volume_type, metadata, + availability_zone): + vol = stub_volume('1') + vol['status'] = 'creating' + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['availability_zone'] = 'cinder' + return vol + + +def stub_volume_update(self, context, *args, **param): + pass + + +def stub_volume_delete(self, context, *args, **param): + pass + + +def stub_volume_get(self, context, volume_id): + return stub_volume(volume_id) + + +def stub_volume_get_notfound(self, context, volume_id): + raise exc.NotFound + + +def stub_volume_get_all(context, search_opts=None, marker=None, limit=None, + sort_key='created_at', sort_dir='desc'): + return [stub_volume(100, project_id='fake'), + stub_volume(101, project_id='superfake'), + stub_volume(102, project_id='superduperfake')] + + +def stub_volume_get_all_by_project(self, context, marker, limit, sort_key, + sort_dir, filters={}): + return [stub_volume_get(self, context, '1')] + + +def stub_snapshot(id, **kwargs): + snapshot = {'id': id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake'} + + snapshot.update(kwargs) + return snapshot + + +def stub_snapshot_get_all(self): + return [stub_snapshot(100, project_id='fake'), + stub_snapshot(101, project_id='superfake'), + stub_snapshot(102, project_id='superduperfake')] + + +def stub_snapshot_get_all_by_project(self, context): + return [stub_snapshot(1)] + + +def stub_snapshot_update(self, context, *args, **param): + pass diff --git a/cinder/tests/api/v2/test_limits.py b/cinder/tests/api/v2/test_limits.py new file mode 100644 index 0000000000..bfbff2249e --- /dev/null +++ b/cinder/tests/api/v2/test_limits.py @@ -0,0 +1,890 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests dealing with HTTP rate-limiting. +""" + +import httplib +import StringIO + +from lxml import etree +import webob +from xml.dom import minidom + +from cinder.api.v2 import limits +from cinder.api import views +from cinder.api import xmlutil +import cinder.context +from cinder.openstack.common import jsonutils +from cinder import test + + +TEST_LIMITS = [ + limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), + limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), + limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), + limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), + limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), +] +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/common/api/v1.0', +} + + +class BaseLimitTestSuite(test.TestCase): + """Base test suite which provides relevant stubs and time abstraction.""" + + def setUp(self): + super(BaseLimitTestSuite, self).setUp() + self.time = 0.0 + self.stubs.Set(limits.Limit, "_get_time", self._get_time) + self.absolute_limits = {} + + def stub_get_project_quotas(context, project_id, usages=True): + return dict((k, dict(limit=v)) + for k, v in self.absolute_limits.items()) + + self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas", + stub_get_project_quotas) + + def _get_time(self): + """Return the "time" according to this test suite.""" + return self.time + + +class LimitsControllerTest(BaseLimitTestSuite): + """ + Tests for `limits.LimitsController` class. + """ + + def setUp(self): + """Run before each test.""" + super(LimitsControllerTest, self).setUp() + self.controller = limits.create_resource() + + def _get_index_request(self, accept_header="application/json"): + """Helper to set routing arguments.""" + request = webob.Request.blank("/") + request.accept = accept_header + request.environ["wsgiorg.routing_args"] = (None, { + "action": "index", + "controller": "", + }) + context = cinder.context.RequestContext('testuser', 'testproject') + request.environ["cinder.context"] = context + return request + + def _populate_limits(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), + limits.Limit("GET", "changes-since*", "changes-since", + 5, 60).display(), + ] + request.environ["cinder.limits"] = _limits + return request + + def test_empty_index_json(self): + """Test getting empty limit details in JSON.""" + request = self._get_index_request() + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [], + "absolute": {}, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def test_index_json(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits(request) + self.absolute_limits = { + 'gigabytes': 512, + 'volumes': 5, + } + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + { + "verb": "POST", + "next-available": "1970-01-01T00:00:00Z", + "unit": "HOUR", + "value": 5, + "remaining": 5, + }, + ], + }, + { + "regex": "changes-since", + "uri": "changes-since*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 5, + "remaining": 5, + }, + ], + }, + + ], + "absolute": {"maxTotalVolumeGigabytes": 512, + "maxTotalVolumes": 5, }, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def _populate_limits_diff_regex(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("GET", "*", "*.*", 10, 60).display(), + ] + request.environ["cinder.limits"] = _limits + return request + + def test_index_diff_regex(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits_diff_regex(request) + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + { + "regex": "*.*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + + ], + "absolute": {}, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def _test_index_absolute_limits_json(self, expected): + request = self._get_index_request() + response = request.get_response(self.controller) + body = jsonutils.loads(response.body) + self.assertEqual(expected, body['limits']['absolute']) + + def test_index_ignores_extra_absolute_limits_json(self): + self.absolute_limits = {'unknown_limit': 9001} + self._test_index_absolute_limits_json({}) + + +class TestLimiter(limits.Limiter): + pass + + +class LimitMiddlewareTest(BaseLimitTestSuite): + """ + Tests for the `limits.RateLimitingMiddleware` class. + """ + + @webob.dec.wsgify + def _empty_app(self, request): + """Do-nothing WSGI app.""" + pass + + def setUp(self): + """Prepare middleware for use through fake WSGI app.""" + super(LimitMiddlewareTest, self).setUp() + _limits = '(GET, *, .*, 1, MINUTE)' + self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, + "%s.TestLimiter" % + self.__class__.__module__) + + def test_limit_class(self): + """Test that middleware selected correct limiter class.""" + assert isinstance(self.app._limiter, TestLimiter) + + def test_good_request(self): + """Test successful GET request through middleware.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_limited_request_json(self): + """Test a rate-limited (413) GET request through middleware.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(response.status_int, 413) + + self.assertTrue('Retry-After' in response.headers) + retry_after = int(response.headers['Retry-After']) + self.assertAlmostEqual(retry_after, 60, 1) + + body = jsonutils.loads(response.body) + expected = "Only 1 GET request(s) can be made to * every minute." + value = body["overLimitFault"]["details"].strip() + self.assertEqual(value, expected) + + def test_limited_request_xml(self): + """Test a rate-limited (413) response as XML""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + request = webob.Request.blank("/") + request.accept = "application/xml" + response = request.get_response(self.app) + self.assertEqual(response.status_int, 413) + + root = minidom.parseString(response.body).childNodes[0] + expected = "Only 1 GET request(s) can be made to * every minute." + + details = root.getElementsByTagName("details") + self.assertEqual(details.length, 1) + + value = details.item(0).firstChild.data.strip() + self.assertEqual(value, expected) + + +class LimitTest(BaseLimitTestSuite): + """ + Tests for the `limits.Limit` class. + """ + + def test_GET_no_delay(self): + """Test a limit handles 1 GET per second.""" + limit = limits.Limit("GET", "*", ".*", 1, 1) + delay = limit("GET", "/anything") + self.assertEqual(None, delay) + self.assertEqual(0, limit.next_request) + self.assertEqual(0, limit.last_request) + + def test_GET_delay(self): + """Test two calls to 1 GET per second limit.""" + limit = limits.Limit("GET", "*", ".*", 1, 1) + delay = limit("GET", "/anything") + self.assertEqual(None, delay) + + delay = limit("GET", "/anything") + self.assertEqual(1, delay) + self.assertEqual(1, limit.next_request) + self.assertEqual(0, limit.last_request) + + self.time += 4 + + delay = limit("GET", "/anything") + self.assertEqual(None, delay) + self.assertEqual(4, limit.next_request) + self.assertEqual(4, limit.last_request) + + +class ParseLimitsTest(BaseLimitTestSuite): + """ + Tests for the default limits parser in the in-memory + `limits.Limiter` class. + """ + + def test_invalid(self): + """Test that parse_limits() handles invalid input correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + ';;;;;') + + def test_bad_rule(self): + """Test that parse_limits() handles bad rules correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + 'GET, *, .*, 20, minute') + + def test_missing_arg(self): + """Test that parse_limits() handles missing args correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20)') + + def test_bad_value(self): + """Test that parse_limits() handles bad values correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, foo, minute)') + + def test_bad_unit(self): + """Test that parse_limits() handles bad units correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20, lightyears)') + + def test_multiple_rules(self): + """Test that parse_limits() handles multiple rules correctly.""" + try: + l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' + '(PUT, /foo*, /foo.*, 10, hour);' + '(POST, /bar*, /bar.*, 5, second);' + '(Say, /derp*, /derp.*, 1, day)') + except ValueError, e: + assert False, str(e) + + # Make sure the number of returned limits are correct + self.assertEqual(len(l), 4) + + # Check all the verbs... + expected = ['GET', 'PUT', 'POST', 'SAY'] + self.assertEqual([t.verb for t in l], expected) + + # ...the URIs... + expected = ['*', '/foo*', '/bar*', '/derp*'] + self.assertEqual([t.uri for t in l], expected) + + # ...the regexes... + expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] + self.assertEqual([t.regex for t in l], expected) + + # ...the values... + expected = [20, 10, 5, 1] + self.assertEqual([t.value for t in l], expected) + + # ...and the units... + expected = [limits.PER_MINUTE, limits.PER_HOUR, + limits.PER_SECOND, limits.PER_DAY] + self.assertEqual([t.unit for t in l], expected) + + +class LimiterTest(BaseLimitTestSuite): + """ + Tests for the in-memory `limits.Limiter` class. + """ + + def setUp(self): + """Run before each test.""" + super(LimiterTest, self).setUp() + userlimits = {'user:user3': ''} + self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) + + def _check(self, num, verb, url, username=None): + """Check and yield results from checks.""" + for x in xrange(num): + yield self.limiter.check_for_delay(verb, url, username)[0] + + def _check_sum(self, num, verb, url, username=None): + """Check and sum results from checks.""" + results = self._check(num, verb, url, username) + return sum(item for item in results if item) + + def test_no_delay_GET(self): + """ + Simple test to ensure no delay on a single call for a limit verb we + didn"t set. + """ + delay = self.limiter.check_for_delay("GET", "/anything") + self.assertEqual(delay, (None, None)) + + def test_no_delay_PUT(self): + """ + Simple test to ensure no delay on a single call for a known limit. + """ + delay = self.limiter.check_for_delay("PUT", "/anything") + self.assertEqual(delay, (None, None)) + + def test_delay_PUT(self): + """ + Ensure the 11th PUT will result in a delay of 6.0 seconds until + the next request will be granced. + """ + expected = [None] * 10 + [6.0] + results = list(self._check(11, "PUT", "/anything")) + + self.assertEqual(expected, results) + + def test_delay_POST(self): + """ + Ensure the 8th POST will result in a delay of 6.0 seconds until + the next request will be granced. + """ + expected = [None] * 7 + results = list(self._check(7, "POST", "/anything")) + self.assertEqual(expected, results) + + expected = 60.0 / 7.0 + results = self._check_sum(1, "POST", "/anything") + self.failUnlessAlmostEqual(expected, results, 8) + + def test_delay_GET(self): + """ + Ensure the 11th GET will result in NO delay. + """ + expected = [None] * 11 + results = list(self._check(11, "GET", "/anything")) + + self.assertEqual(expected, results) + + def test_delay_PUT_volumes(self): + """ + Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still + OK after 5 requests...but then after 11 total requests, PUT limiting + kicks in. + """ + # First 6 requests on PUT /volumes + expected = [None] * 5 + [12.0] + results = list(self._check(6, "PUT", "/volumes")) + self.assertEqual(expected, results) + + # Next 5 request on PUT /anything + expected = [None] * 4 + [6.0] + results = list(self._check(5, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_delay_PUT_wait(self): + """ + Ensure after hitting the limit and then waiting for the correct + amount of time, the limit will be lifted. + """ + expected = [None] * 10 + [6.0] + results = list(self._check(11, "PUT", "/anything")) + self.assertEqual(expected, results) + + # Advance time + self.time += 6.0 + + expected = [None, 6.0] + results = list(self._check(2, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_multiple_delays(self): + """ + Ensure multiple requests still get a delay. + """ + expected = [None] * 10 + [6.0] * 10 + results = list(self._check(20, "PUT", "/anything")) + self.assertEqual(expected, results) + + self.time += 1.0 + + expected = [5.0] * 10 + results = list(self._check(10, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_user_limit(self): + """ + Test user-specific limits. + """ + self.assertEqual(self.limiter.levels['user3'], []) + + def test_multiple_users(self): + """ + Tests involving multiple users. + """ + # User1 + expected = [None] * 10 + [6.0] * 10 + results = list(self._check(20, "PUT", "/anything", "user1")) + self.assertEqual(expected, results) + + # User2 + expected = [None] * 10 + [6.0] * 5 + results = list(self._check(15, "PUT", "/anything", "user2")) + self.assertEqual(expected, results) + + # User3 + expected = [None] * 20 + results = list(self._check(20, "PUT", "/anything", "user3")) + self.assertEqual(expected, results) + + self.time += 1.0 + + # User1 again + expected = [5.0] * 10 + results = list(self._check(10, "PUT", "/anything", "user1")) + self.assertEqual(expected, results) + + self.time += 1.0 + + # User1 again + expected = [4.0] * 5 + results = list(self._check(5, "PUT", "/anything", "user2")) + self.assertEqual(expected, results) + + +class WsgiLimiterTest(BaseLimitTestSuite): + """ + Tests for `limits.WsgiLimiter` class. + """ + + def setUp(self): + """Run before each test.""" + super(WsgiLimiterTest, self).setUp() + self.app = limits.WsgiLimiter(TEST_LIMITS) + + def _request_data(self, verb, path): + """Get data decribing a limit request verb/path.""" + return jsonutils.dumps({"verb": verb, "path": path}) + + def _request(self, verb, url, username=None): + """Make sure that POSTing to the given url causes the given username + to perform the given action. Make the internal rate limiter return + delay and make sure that the WSGI app returns the correct response. + """ + if username: + request = webob.Request.blank("/%s" % username) + else: + request = webob.Request.blank("/") + + request.method = "POST" + request.body = self._request_data(verb, url) + response = request.get_response(self.app) + + if "X-Wait-Seconds" in response.headers: + self.assertEqual(response.status_int, 403) + return response.headers["X-Wait-Seconds"] + + self.assertEqual(response.status_int, 204) + + def test_invalid_methods(self): + """Only POSTs should work.""" + for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: + request = webob.Request.blank("/", method=method) + response = request.get_response(self.app) + self.assertEqual(response.status_int, 405) + + def test_good_url(self): + delay = self._request("GET", "/something") + self.assertEqual(delay, None) + + def test_escaping(self): + delay = self._request("GET", "/something/jump%20up") + self.assertEqual(delay, None) + + def test_response_to_delays(self): + delay = self._request("GET", "/delayed") + self.assertEqual(delay, None) + + delay = self._request("GET", "/delayed") + self.assertEqual(delay, '60.00') + + def test_response_to_delays_usernames(self): + delay = self._request("GET", "/delayed", "user1") + self.assertEqual(delay, None) + + delay = self._request("GET", "/delayed", "user2") + self.assertEqual(delay, None) + + delay = self._request("GET", "/delayed", "user1") + self.assertEqual(delay, '60.00') + + delay = self._request("GET", "/delayed", "user2") + self.assertEqual(delay, '60.00') + + +class FakeHttplibSocket(object): + """ + Fake `httplib.HTTPResponse` replacement. + """ + + def __init__(self, response_string): + """Initialize new `FakeHttplibSocket`.""" + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer.""" + return self._buffer + + +class FakeHttplibConnection(object): + """ + Fake `httplib.HTTPConnection`. + """ + + def __init__(self, app, host): + """ + Initialize `FakeHttplibConnection`. + """ + self.app = app + self.host = host + + def request(self, method, path, body="", headers=None): + """ + Requests made via this connection actually get translated and routed + into our WSGI app, we then wait for the response and turn it back into + an `httplib.HTTPResponse`. + """ + if not headers: + headers = {} + + req = webob.Request.blank(path) + req.method = method + req.headers = headers + req.host = self.host + req.body = body + + resp = str(req.get_response(self.app)) + resp = "HTTP/1.0 %s" % resp + sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(sock) + self.http_response.begin() + + def getresponse(self): + """Return our generated response from the request.""" + return self.http_response + + +def wire_HTTPConnection_to_WSGI(host, app): + """Monkeypatches HTTPConnection so that if you try to connect to host, you + are instead routed straight to the given WSGI app. + + After calling this method, when any code calls + + httplib.HTTPConnection(host) + + the connection object will be a fake. Its requests will be sent directly + to the given WSGI app rather than through a socket. + + Code connecting to hosts other than host will not be affected. + + This method may be called multiple times to map different hosts to + different apps. + + This method returns the original HTTPConnection object, so that the caller + can restore the default HTTPConnection interface (for all hosts). + """ + class HTTPConnectionDecorator(object): + """Wraps the real HTTPConnection class so that when you instantiate + the class you might instead get a fake instance.""" + + def __init__(self, wrapped): + self.wrapped = wrapped + + def __call__(self, connection_host, *args, **kwargs): + if connection_host == host: + return FakeHttplibConnection(app, host) + else: + return self.wrapped(connection_host, *args, **kwargs) + + oldHTTPConnection = httplib.HTTPConnection + httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) + return oldHTTPConnection + + +class WsgiLimiterProxyTest(BaseLimitTestSuite): + """ + Tests for the `limits.WsgiLimiterProxy` class. + """ + + def setUp(self): + """ + Do some nifty HTTP/WSGI magic which allows for WSGI to be called + directly by something like the `httplib` library. + """ + super(WsgiLimiterProxyTest, self).setUp() + self.app = limits.WsgiLimiter(TEST_LIMITS) + self.oldHTTPConnection = ( + wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) + self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") + + def test_200(self): + """Successful request test.""" + delay = self.proxy.check_for_delay("GET", "/anything") + self.assertEqual(delay, (None, None)) + + def test_403(self): + """Forbidden request test.""" + delay = self.proxy.check_for_delay("GET", "/delayed") + self.assertEqual(delay, (None, None)) + + delay, error = self.proxy.check_for_delay("GET", "/delayed") + error = error.strip() + + expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be " + "made to /delayed every minute.") + + self.assertEqual((delay, error), expected) + + def tearDown(self): + # restore original HTTPConnection object + httplib.HTTPConnection = self.oldHTTPConnection + + +class LimitsViewBuilderTest(test.TestCase): + def setUp(self): + super(LimitsViewBuilderTest, self).setUp() + self.view_builder = views.limits.ViewBuilder() + self.rate_limits = [{"URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226}, + {"URI": "*/volumes", + "regex": "^/volumes", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226}] + self.absolute_limits = {"metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5} + + def test_build_limits(self): + tdate = "2011-07-21T18:17:06Z" + expected_limits = { + "limits": {"rate": [{"uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": tdate}]}, + {"uri": "*/volumes", + "regex": "^/volumes", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": tdate}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5}}} + + output = self.view_builder.build(self.rate_limits, + self.absolute_limits) + self.assertDictMatch(output, expected_limits) + + def test_build_limits_empty_limits(self): + expected_limits = {"limits": {"rate": [], + "absolute": {}}} + + abs_limits = {} + rate_limits = [] + output = self.view_builder.build(rate_limits, abs_limits) + self.assertDictMatch(output, expected_limits) + + +class LimitsXMLSerializationTest(test.TestCase): + def test_xml_declaration(self): + serializer = limits.LimitsTemplate() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture) + has_dec = output.startswith("") + self.assertTrue(has_dec) + + def test_index(self): + tdate = "2011-12-15T22:42:45Z" + serializer = limits.LimitsTemplate() + fixture = {"limits": {"rate": [{"uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": tdate}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": tdate}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240}}} + + output = serializer.serialize(fixture) + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 4) + for limit in absolutes: + name = limit.get('name') + value = limit.get('value') + self.assertEqual(value, str(fixture['limits']['absolute'][name])) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 2) + for i, rate in enumerate(rates): + for key in ['uri', 'regex']: + self.assertEqual(rate.get(key), + str(fixture['limits']['rate'][i][key])) + rate_limits = rate.xpath('ns:limit', namespaces=NS) + self.assertEqual(len(rate_limits), 1) + for j, limit in enumerate(rate_limits): + for key in ['verb', 'value', 'remaining', 'unit', + 'next-available']: + self.assertEqual( + limit.get(key), + str(fixture['limits']['rate'][i]['limit'][j][key])) + + def test_index_no_limits(self): + serializer = limits.LimitsTemplate() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture) + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 0) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 0) diff --git a/cinder/tests/api/v2/test_snapshot_metadata.py b/cinder/tests/api/v2/test_snapshot_metadata.py new file mode 100644 index 0000000000..ae0333283a --- /dev/null +++ b/cinder/tests/api/v2/test_snapshot_metadata.py @@ -0,0 +1,458 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v2 import snapshot_metadata +from cinder.api.v2 import snapshots +import cinder.db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes + + +CONF = cfg.CONF + + +def return_create_snapshot_metadata_max(context, + snapshot_id, + metadata, + delete): + return stub_max_snapshot_metadata() + + +def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): + return stub_snapshot_metadata() + + +def return_snapshot_metadata(context, snapshot_id): + if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36: + msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id + raise Exception(msg) + return stub_snapshot_metadata() + + +def return_empty_snapshot_metadata(context, snapshot_id): + return {} + + +def delete_snapshot_metadata(context, snapshot_id, key): + pass + + +def stub_snapshot_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_max_snapshot_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_snapshot(context, snapshot_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'status': 'available', + 'metadata': {}} + + +def return_volume(context, volume_id): + return {'id': 'fake-vol-id', + 'size': 100, + 'name': 'fake', + 'host': 'fake-host', + 'status': 'available', + 'metadata': {}} + + +def return_snapshot_nonexistent(context, snapshot_id): + raise exception.SnapshotNotFound('bogus test message') + + +def fake_update_snapshot_metadata(self, context, snapshot, diff): + pass + + +class SnapshotMetaDataTest(test.TestCase): + + def setUp(self): + super(SnapshotMetaDataTest, self).setUp() + self.volume_api = cinder.volume.api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(cinder.db, 'volume_get', return_volume) + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + + self.stubs.Set(self.volume_api, 'update_snapshot_metadata', + fake_update_snapshot_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) + self.controller = snapshot_metadata.Controller() + self.id = str(uuid.uuid4()) + self.url = '/v2/fake/snapshots/%s/metadata' % self.id + + snap = {"volume_size": 100, + "volume_id": "fake-vol-id", + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "host": "fake-host", + "metadata": {}} + body = {"snapshot": snap} + req = fakes.HTTPRequest.blank('/v2/snapshots') + self.snapshot_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.id, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_delete', + delete_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.id, 'key6') + + def test_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.id, body) + self.assertEqual(body, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.id, body) + + def test_create_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.id, body) + + def test_update_all(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.id, expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.id, expected) + + def test_update_all_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank( + '/v2/fake/snapshots/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.id, 'key1', body) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'key1', None) + + def test_update_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'key1', body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.id, 'bad', body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.id, data) diff --git a/cinder/tests/api/v2/test_snapshots.py b/cinder/tests/api/v2/test_snapshots.py new file mode 100644 index 0000000000..f003cb0b3d --- /dev/null +++ b/cinder/tests/api/v2/test_snapshots.py @@ -0,0 +1,425 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api.v2 import snapshots +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder import volume + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + +UUID = '00000000-0000-0000-0000-000000000001' +INVALID_UUID = '00000000-0000-0000-0000-000000000002' + + +def _get_default_snapshot_param(): + return { + 'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + } + + +def stub_snapshot_create(self, context, + volume_id, name, + description, metadata): + snapshot = _get_default_snapshot_param() + snapshot['volume_id'] = volume_id + snapshot['display_name'] = name + snapshot['display_description'] = description + snapshot['metadata'] = metadata + return snapshot + + +def stub_snapshot_delete(self, context, snapshot): + if snapshot['id'] != UUID: + raise exception.NotFound + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != UUID: + raise exception.NotFound + + param = _get_default_snapshot_param() + return param + + +def stub_snapshot_get_all(self, context, search_opts=None): + param = _get_default_snapshot_param() + return [param] + + +class SnapshotApiTest(test.TestCase): + def setUp(self): + super(SnapshotApiTest, self).setUp() + self.controller = snapshots.SnapshotsController() + + self.stubs.Set(db, 'snapshot_get_all_by_project', + stubs.stub_snapshot_get_all_by_project) + self.stubs.Set(db, 'snapshot_get_all', + stubs.stub_snapshot_get_all) + + def test_snapshot_create(self): + self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) + snapshot_name = 'Snapshot Test Name' + snapshot_description = 'Snapshot Test Desc' + snapshot = { + "volume_id": '12', + "force": False, + "name": snapshot_name, + "description": snapshot_description + } + + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['name'], + snapshot_name) + self.assertEqual(resp_dict['snapshot']['description'], + snapshot_description) + + def test_snapshot_create_force(self): + self.stubs.Set(volume.api.API, "create_snapshot_force", + stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) + snapshot_name = 'Snapshot Test Name' + snapshot_description = 'Snapshot Test Desc' + snapshot = { + "volume_id": '12', + "force": True, + "name": snapshot_name, + "description": snapshot_description + } + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['name'], + snapshot_name) + self.assertEqual(resp_dict['snapshot']['description'], + snapshot_description) + + snapshot = { + "volume_id": "12", + "force": "**&&^^%%$$##@@", + "name": "Snapshot Test Name", + "description": "Snapshot Test Desc" + } + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v2/snapshots') + self.assertRaises(exception.InvalidParameterValue, + self.controller.create, + req, + body) + + def test_snapshot_update(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "update_snapshot", + stubs.stub_snapshot_update) + updates = { + "name": "Updated Test Name", + } + body = {"snapshot": updates} + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + res_dict = self.controller.update(req, UUID, body) + expected = { + 'snapshot': { + 'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'size': 100, + 'created_at': None, + 'name': 'Updated Test Name', + 'description': 'Default description', + 'metadata': {}, + } + } + self.assertEquals(expected, res_dict) + + def test_snapshot_update_missing_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, UUID, body) + + def test_snapshot_update_invalid_body(self): + body = {'name': 'missing top level snapshot key'} + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, UUID, body) + + def test_snapshot_update_not_found(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + updates = { + "name": "Updated Test Name", + } + body = {"snapshot": updates} + req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, + 'not-the-uuid', body) + + def test_snapshot_delete(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + + snapshot_id = UUID + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) + resp = self.controller.delete(req, snapshot_id) + self.assertEqual(resp.status_int, 202) + + def test_snapshot_delete_invalid_id(self): + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + req, snapshot_id) + + def test_snapshot_show(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + resp_dict = self.controller.show(req, UUID) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['id'], UUID) + + def test_snapshot_show_invalid_id(self): + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, snapshot_id) + + def test_snapshot_detail(self): + self.stubs.Set(volume.api.API, "get_all_snapshots", + stub_snapshot_get_all) + req = fakes.HTTPRequest.blank('/v2/snapshots/detail') + resp_dict = self.controller.detail(req) + + self.assertTrue('snapshots' in resp_dict) + resp_snapshots = resp_dict['snapshots'] + self.assertEqual(len(resp_snapshots), 1) + + resp_snapshot = resp_snapshots.pop() + self.assertEqual(resp_snapshot['id'], UUID) + + def test_snapshot_list_by_status(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1', + status='available'), + stubs.stub_snapshot(2, display_name='backup2', + status='available'), + stubs.stub_snapshot(3, display_name='backup3', + status='creating'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # no status filter + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 3) + # single match + req = fakes.HTTPRequest.blank('/v2/snapshots?status=creating') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['status'], 'creating') + # multiple match + req = fakes.HTTPRequest.blank('/v2/snapshots?status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 2) + for snapshot in resp['snapshots']: + self.assertEquals(snapshot['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v2/snapshots?status=error') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 0) + + def test_snapshot_list_by_volume(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, volume_id='vol1', status='creating'), + stubs.stub_snapshot(2, volume_id='vol1', status='available'), + stubs.stub_snapshot(3, volume_id='vol2', status='available'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # single match + req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol2') + # multiple match + req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 2) + for snapshot in resp['snapshots']: + self.assertEqual(snapshot['volume_id'], 'vol1') + # multiple filters + req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol1' + '&status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol1') + self.assertEqual(resp['snapshots'][0]['status'], 'available') + + def test_snapshot_list_by_name(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1'), + stubs.stub_snapshot(2, display_name='backup2'), + stubs.stub_snapshot(3, display_name='backup3'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # no name filter + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 3) + # filter by one name + req = fakes.HTTPRequest.blank('/v2/snapshots?name=backup2') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEquals(resp['snapshots'][0]['name'], 'backup2') + # filter no match + req = fakes.HTTPRequest.blank('/v2/snapshots?name=backup4') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 0) + + def test_admin_list_snapshots_limited_to_project(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots', + use_admin_context=True) + res = self.controller.index(req) + + self.assertTrue('snapshots' in res) + self.assertEqual(1, len(res['snapshots'])) + + def test_admin_list_snapshots_all_tenants(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertTrue('snapshots' in res) + self.assertEqual(3, len(res['snapshots'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1') + res = self.controller.index(req) + self.assertTrue('snapshots' in res) + self.assertEqual(1, len(res['snapshots'])) + + def test_non_admin_get_by_project(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots') + res = self.controller.index(req) + self.assertTrue('snapshots' in res) + self.assertEqual(1, len(res['snapshots'])) + + def _create_snapshot_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_create_no_body(self): + self._create_snapshot_bad_body(body=None) + + def test_create_missing_snapshot(self): + body = {'foo': {'a': 'b'}} + self._create_snapshot_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'snapshot': 'string'} + self._create_snapshot_bad_body(body=body) + + +class SnapshotSerializerTest(test.TestCase): + def _verify_snapshot(self, snap, tree): + self.assertEqual(tree.tag, 'snapshot') + + for attr in ('id', 'status', 'size', 'created_at', + 'name', 'description', 'volume_id'): + self.assertEqual(str(snap[attr]), tree.get(attr)) + + def test_snapshot_show_create_serializer(self): + serializer = snapshots.SnapshotTemplate() + raw_snapshot = dict( + id='snap_id', + status='snap_status', + size=1024, + created_at=datetime.datetime.now(), + name='snap_name', + description='snap_desc', + display_description='snap_desc', + volume_id='vol_id', + ) + text = serializer.serialize(dict(snapshot=raw_snapshot)) + + print text + tree = etree.fromstring(text) + + self._verify_snapshot(raw_snapshot, tree) + + def test_snapshot_index_detail_serializer(self): + serializer = snapshots.SnapshotsTemplate() + raw_snapshots = [ + dict( + id='snap1_id', + status='snap1_status', + size=1024, + created_at=datetime.datetime.now(), + name='snap1_name', + description='snap1_desc', + volume_id='vol1_id', + ), + dict( + id='snap2_id', + status='snap2_status', + size=1024, + created_at=datetime.datetime.now(), + name='snap2_name', + description='snap2_desc', + volume_id='vol2_id', + ) + ] + text = serializer.serialize(dict(snapshots=raw_snapshots)) + + print text + tree = etree.fromstring(text) + + self.assertEqual('snapshots', tree.tag) + self.assertEqual(len(raw_snapshots), len(tree)) + for idx, child in enumerate(tree): + self._verify_snapshot(raw_snapshots[idx], child) diff --git a/cinder/tests/api/v2/test_types.py b/cinder/tests/api/v2/test_types.py new file mode 100644 index 0000000000..1a3849f822 --- /dev/null +++ b/cinder/tests/api/v2/test_types.py @@ -0,0 +1,211 @@ +# Copyright 2011 OpenStack LLC. +# aLL Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.v2 import types +from cinder.api.views import types as views_types +from cinder import exception +from cinder.openstack.common import timeutils +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import volume_types + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5" + } + return dict( + id=id, + name='vol_type_%s' % str(id), + extra_specs=specs, + ) + + +def return_volume_types_get_all_types(context): + return dict( + vol_type_1=stub_volume_type(1), + vol_type_2=stub_volume_type(2), + vol_type_3=stub_volume_type(3) + ) + + +def return_empty_volume_types_get_all_types(context): + return {} + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesApiTest, self).setUp() + self.controller = types.VolumeTypesController() + + def test_volume_types_index(self): + self.stubs.Set(volume_types, 'get_all_types', + return_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v2/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(3, len(res_dict['volume_types'])) + + expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] + actual_names = map(lambda e: e['name'], res_dict['volume_types']) + self.assertEqual(set(actual_names), set(expected_names)) + for entry in res_dict['volume_types']: + self.assertEqual('value1', entry['extra_specs']['key1']) + + def test_volume_types_index_no_data(self): + self.stubs.Set(volume_types, 'get_all_types', + return_empty_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v2/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(0, len(res_dict['volume_types'])) + + def test_volume_types_show(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v2/fake/types/1') + res_dict = self.controller.show(req, 1) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('1', res_dict['volume_type']['id']) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_volume_types_show_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v2/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, '777') + + def test_view_builder_show(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.isotime() + raw_volume_type = dict( + name='new_type', + deleted=False, + created_at=now, + updated_at=now, + extra_specs={}, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v2") + output = view_builder.show(request, raw_volume_type) + + self.assertTrue('volume_type' in output) + expected_volume_type = dict( + name='new_type', + extra_specs={}, + id=42, + ) + self.assertDictMatch(output['volume_type'], expected_volume_type) + + def test_view_builder_list(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.isotime() + raw_volume_types = [] + for i in range(0, 10): + raw_volume_types.append( + dict( + name='new_type', + deleted=False, + created_at=now, + updated_at=now, + extra_specs={}, + deleted_at=None, + id=42 + i + ) + ) + + request = fakes.HTTPRequest.blank("/v2") + output = view_builder.index(request, raw_volume_types) + + self.assertTrue('volume_types' in output) + for i in range(0, 10): + expected_volume_type = dict( + name='new_type', + extra_specs={}, + id=42 + i + ) + self.assertDictMatch(output['volume_types'][i], + expected_volume_type) + + +class VolumeTypesSerializerTest(test.TestCase): + def _verify_volume_type(self, vtype, tree): + self.assertEqual('volume_type', tree.tag) + self.assertEqual(vtype['name'], tree.get('name')) + self.assertEqual(str(vtype['id']), tree.get('id')) + self.assertEqual(1, len(tree)) + extra_specs = tree[0] + self.assertEqual('extra_specs', extra_specs.tag) + seen = set(vtype['extra_specs'].keys()) + for child in extra_specs: + self.assertTrue(child.tag in seen) + self.assertEqual(vtype['extra_specs'][child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_index_serializer(self): + serializer = types.VolumeTypesTemplate() + + # Just getting some input data + vtypes = return_volume_types_get_all_types(None) + text = serializer.serialize({'volume_types': vtypes.values()}) + + tree = etree.fromstring(text) + + self.assertEqual('volume_types', tree.tag) + self.assertEqual(len(vtypes), len(tree)) + for child in tree: + name = child.get('name') + self.assertTrue(name in vtypes) + self._verify_volume_type(vtypes[name], child) + + def test_voltype_serializer(self): + serializer = types.VolumeTypeTemplate() + + vtype = stub_volume_type(1) + text = serializer.serialize(dict(volume_type=vtype)) + + tree = etree.fromstring(text) + + self._verify_volume_type(vtype, tree) diff --git a/cinder/tests/api/v2/test_volumes.py b/cinder/tests/api/v2/test_volumes.py new file mode 100644 index 0000000000..9b08ab376a --- /dev/null +++ b/cinder/tests/api/v2/test_volumes.py @@ -0,0 +1,955 @@ +# Copyright 2013 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api import extensions +from cinder.api.v2 import volumes +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.tests.image import fake as fake_image +from cinder.volume import api as volume_api + + +FLAGS = flags.FLAGS +NS = '{http://docs.openstack.org/api/openstack-volume/2.0/content}' + +TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != TEST_SNAPSHOT_UUID: + raise exception.NotFound + + return { + 'id': snapshot_id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'name': 'Default name', + 'description': 'Default description', + } + + +class VolumeApiTest(test.TestCase): + def setUp(self): + super(VolumeApiTest, self).setUp() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + fake_image.stub_out_image_service(self.stubs) + self.controller = volumes.VolumeController(self.ext_mgr) + + self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) + self.maxDiff = None + + def test_volume_create(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + + vol = { + "size": 100, + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zone1:host1" + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.create(req, body) + expected = { + 'volume': { + 'name': 'Volume Test Name', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_volume_create_with_type(self): + vol_type = db.volume_type_create(context.get_admin_context(), + dict(name=FLAGS.default_volume_type, + extra_specs={})) + + db_vol_type = db.volume_type_get(context.get_admin_context(), + vol_type.id) + + vol = { + "size": 100, + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "volume_type": db_vol_type['id'], + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.create(req, body) + volume_id = res_dict['volume']['id'] + self.assertEquals(len(res_dict), 1) + + self.stubs.Set(volume_api.API, 'get_all', + lambda *args, **kwargs: + [stubs.stub_volume(volume_id, + volume_type={'name': vol_type})]) + req = fakes.HTTPRequest.blank('/v2/volumes/detail') + res_dict = self.controller.detail(req) + + def test_volume_creation_fails_with_bad_size(self): + vol = {"size": '', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, + body) + + def test_volume_create_with_image_id(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = {"size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "nova", + "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'} + expected = { + 'volume': { + 'name': 'Volume Test Name', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.create(req, body) + self.assertEqual(res_dict, expected) + + def test_volume_create_with_image_id_is_integer(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = { + "size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": 1234, + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_create_with_image_id_not_uuid_format(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = { + "size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": '12345' + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_update(self): + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + updates = { + "name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'name': 'Updated Test Name', + 'attachments': [ + { + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'device': '/', + } + ], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEquals(res_dict, expected) + + def test_volume_update_metadata(self): + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + updates = { + "metadata": {"qos_max_iops": 2000} + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'name': 'displayname', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'device': '/', + }], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {"qos_max_iops": 2000}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + }} + self.assertEquals(res_dict, expected) + + def test_update_empty_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, '1', body) + + def test_update_invalid_body(self): + body = { + 'name': 'missing top level volume key' + } + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, '1', body) + + def test_update_not_found(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + updates = { + "name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + req, '1', body) + + def test_volume_list_summary(self): + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.index(req) + expected = { + 'volumes': [ + { + 'name': 'displayname', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_volume_list_detail(self): + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v2/volumes/detail') + res_dict = self.controller.detail(req) + expected = { + 'volumes': [ + { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'name': 'displayname', + 'attachments': [ + { + 'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1' + } + ], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_volume_index_with_marker(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v2/volumes?marker=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 2) + self.assertEquals(volumes[0]['id'], 1) + self.assertEquals(volumes[1]['id'], 2) + + def test_volume_index_limit(self): + req = fakes.HTTPRequest.blank('/v2/volumes?limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 1) + + def test_volume_index_limit_negative(self): + req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_index_limit_non_int(self): + req = fakes.HTTPRequest.blank('/v2/volumes?limit=a') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_index_limit_marker(self): + req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 1) + self.assertEquals(volumes[0]['id'], '1') + + def test_volume_index_limit_offset(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 1) + self.assertEquals(volumes[0]['id'], 2) + + req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + def test_volume_detail_with_marker(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 2) + self.assertEquals(volumes[0]['id'], 1) + self.assertEquals(volumes[1]['id'], 2) + + def test_volume_detail_limit(self): + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 1) + + def test_volume_detail_limit_negative(self): + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_detail_limit_non_int(self): + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_detail_limit_marker(self): + req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 1) + self.assertEquals(volumes[0]['id'], '1') + + def test_volume_detail_limit_offset(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEquals(len(volumes), 1) + self.assertEquals(volumes[0]['id'], 2) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + def test_volume_list_by_name(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + stubs.stub_volume(3, display_name='vol3'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + + # no name filter + req = fakes.HTTPRequest.blank('/v2/volumes') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + # filter on name + req = fakes.HTTPRequest.blank('/v2/volumes?name=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['name'], 'vol2') + # filter no match + req = fakes.HTTPRequest.blank('/v2/volumes?name=vol4') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_list_by_status(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1', status='available'), + stubs.stub_volume(2, display_name='vol2', status='available'), + stubs.stub_volume(3, display_name='vol3', status='in-use'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + # no status filter + req = fakes.HTTPRequest.blank('/v2/volumes/details') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 3) + # single match + req = fakes.HTTPRequest.blank('/v2/volumes/details?status=in-use') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['status'], 'in-use') + # multiple match + req = fakes.HTTPRequest.blank('/v2/volumes/details/?status=available') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 2) + for volume in resp['volumes']: + self.assertEqual(volume['status'], 'available') + # multiple filters + req = fakes.HTTPRequest.blank('/v2/volumes/details/?status=available&' + 'name=vol1') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['name'], 'vol1') + self.assertEqual(resp['volumes'][0]['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v2/volumes/details?status=in-use&' + 'name=vol1') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_show(self): + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.show(req, '1') + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'name': 'displayname', + 'attachments': [ + { + 'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1' + } + ], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_volume_show_no_attachments(self): + def stub_volume_get(self, context, volume_id): + return stubs.stub_volume(volume_id, attach_status='detached') + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.show(req, '1') + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'name': 'displayname', + 'attachments': [], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v1/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + + self.assertEqual(res_dict, expected) + + def test_volume_show_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, 1) + + def test_volume_delete(self): + req = fakes.HTTPRequest.blank('/v2/volumes/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_volume_delete_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + req, 1) + + def test_admin_list_volumes_limited_to_project(self): + req = fakes.HTTPRequest.blank('/v2/fake/volumes', + use_admin_context=True) + res = self.controller.index(req) + + self.assertTrue('volumes' in res) + self.assertEqual(1, len(res['volumes'])) + + def test_admin_list_volumes_all_tenants(self): + req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertTrue('volumes' in res) + self.assertEqual(3, len(res['volumes'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1') + res = self.controller.index(req) + self.assertTrue('volumes' in res) + self.assertEqual(1, len(res['volumes'])) + + def test_non_admin_get_by_project(self): + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + res = self.controller.index(req) + self.assertTrue('volumes' in res) + self.assertEqual(1, len(res['volumes'])) + + def _create_volume_bad_request(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_create_no_body(self): + self._create_volume_bad_request(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._create_volume_bad_request(body=body) + + def test_create_malformed_entity(self): + body = {'volume': 'string'} + self._create_volume_bad_request(body=body) + + +class VolumeSerializerTest(test.TestCase): + def _verify_volume_attachment(self, attach, tree): + for attr in ('id', 'volume_id', 'server_id', 'device'): + self.assertEqual(str(attach[attr]), tree.get(attr)) + + def _verify_volume(self, vol, tree): + self.assertEqual(tree.tag, NS + 'volume') + + for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', + 'name', 'description', 'volume_type', + 'snapshot_id', 'source_volid'): + self.assertEqual(str(vol[attr]), tree.get(attr)) + + for child in tree: + print child.tag + self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata')) + if child.tag == 'attachments': + self.assertEqual(1, len(child)) + self.assertEqual('attachment', child[0].tag) + self._verify_volume_attachment(vol['attachments'][0], child[0]) + elif child.tag == 'metadata': + not_seen = set(vol['metadata'].keys()) + for gr_child in child: + self.assertTrue(gr_child.get("key") in not_seen) + self.assertEqual(str(vol['metadata'][gr_child.get("key")]), + gr_child.text) + not_seen.remove(gr_child.get('key')) + self.assertEqual(0, len(not_seen)) + + def test_volume_show_create_serializer(self): + serializer = volumes.VolumeTemplate() + raw_volume = dict( + id='vol_id', + status='vol_status', + size=1024, + availability_zone='vol_availability', + created_at=datetime.datetime.now(), + attachments=[ + dict( + id='vol_id', + volume_id='vol_id', + server_id='instance_uuid', + device='/foo' + ) + ], + name='vol_name', + description='vol_desc', + volume_type='vol_type', + snapshot_id='snap_id', + source_volid='source_volid', + metadata=dict( + foo='bar', + baz='quux', + ), + ) + text = serializer.serialize(dict(volume=raw_volume)) + + print text + tree = etree.fromstring(text) + + self._verify_volume(raw_volume, tree) + + def test_volume_index_detail_serializer(self): + serializer = volumes.VolumesTemplate() + raw_volumes = [ + dict( + id='vol1_id', + status='vol1_status', + size=1024, + availability_zone='vol1_availability', + created_at=datetime.datetime.now(), + attachments=[ + dict( + id='vol1_id', + volume_id='vol1_id', + server_id='instance_uuid', + device='/foo1' + ) + ], + name='vol1_name', + description='vol1_desc', + volume_type='vol1_type', + snapshot_id='snap1_id', + source_volid=None, + metadata=dict(foo='vol1_foo', + bar='vol1_bar', ), ), + dict( + id='vol2_id', + status='vol2_status', + size=1024, + availability_zone='vol2_availability', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol2_id', + volume_id='vol2_id', + server_id='instance_uuid', + device='/foo2')], + name='vol2_name', + description='vol2_desc', + volume_type='vol2_type', + snapshot_id='snap2_id', + source_volid=None, + metadata=dict(foo='vol2_foo', + bar='vol2_bar', ), )] + text = serializer.serialize(dict(volumes=raw_volumes)) + + print text + tree = etree.fromstring(text) + + self.assertEqual(NS + 'volumes', tree.tag) + self.assertEqual(len(raw_volumes), len(tree)) + for idx, child in enumerate(tree): + self._verify_volume(raw_volumes[idx], child) + + +class TestVolumeCreateRequestXMLDeserializer(test.TestCase): + + def setUp(self): + super(TestVolumeCreateRequestXMLDeserializer, self).setUp() + self.deserializer = volumes.CreateDeserializer() + + def test_minimal_volume(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + }, + } + self.assertEquals(request['body'], expected) + + def test_name(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + }, + } + self.assertEquals(request['body'], expected) + + def test_description(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + }, + } + self.assertEquals(request['body'], expected) + + def test_volume_type(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "name": "Volume-xml", + "size": "1", + "name": "Volume-xml", + "description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + }, + } + self.assertEquals(request['body'], expected) + + def test_availability_zone(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + }, + } + self.assertEquals(request['body'], expected) + + def test_metadata(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "name": "Volume-xml", + "size": "1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEquals(request['body'], expected) + + def test_full_volume(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEquals(request['body'], expected) diff --git a/cinder/tests/backup/__init__.py b/cinder/tests/backup/__init__.py new file mode 100644 index 0000000000..cdf1b48e4b --- /dev/null +++ b/cinder/tests/backup/__init__.py @@ -0,0 +1,14 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/tests/backup/fake_service.py b/cinder/tests/backup/fake_service.py new file mode 100644 index 0000000000..c8a182eb26 --- /dev/null +++ b/cinder/tests/backup/fake_service.py @@ -0,0 +1,41 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.db import base +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class FakeBackupService(base.Base): + def __init__(self, context, db_driver=None): + super(FakeBackupService, self).__init__(db_driver) + + def backup(self, backup, volume_file): + pass + + def restore(self, backup, volume_id, volume_file): + pass + + def delete(self, backup): + # if backup has magic name of 'fail_on_delete' + # we raise an error - useful for some tests - + # otherwise we return without error + if backup['display_name'] == 'fail_on_delete': + raise IOError('fake') + + +def get_backup_service(context): + return FakeBackupService(context) diff --git a/cinder/tests/backup/fake_swift_client.py b/cinder/tests/backup/fake_swift_client.py new file mode 100644 index 0000000000..ff74246797 --- /dev/null +++ b/cinder/tests/backup/fake_swift_client.py @@ -0,0 +1,111 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib +import json +import os +import socket +import zlib + +from cinder.openstack.common import log as logging +from swiftclient import client as swift + +LOG = logging.getLogger(__name__) + + +class FakeSwiftClient(object): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + pass + + @classmethod + def Connection(self, *args, **kargs): + LOG.debug("fake FakeSwiftClient Connection") + return FakeSwiftConnection() + + +class FakeSwiftConnection(object): + """Logging calls instead of executing""" + def __init__(self, *args, **kwargs): + pass + + def head_container(self, container): + LOG.debug("fake head_container(%s)" % container) + if container == 'missing_container': + raise swift.ClientException('fake exception', + http_status=httplib.NOT_FOUND) + elif container == 'unauthorized_container': + raise swift.ClientException('fake exception', + http_status=httplib.UNAUTHORIZED) + elif container == 'socket_error_on_head': + raise socket.error(111, 'ECONNREFUSED') + pass + + def put_container(self, container): + LOG.debug("fake put_container(%s)" % container) + pass + + def get_container(self, container, **kwargs): + LOG.debug("fake get_container(%s)" % container) + fake_header = None + fake_body = [{'name': 'backup_001'}, + {'name': 'backup_002'}, + {'name': 'backup_003'}] + return fake_header, fake_body + + def head_object(self, container, name): + LOG.debug("fake put_container(%s, %s)" % (container, name)) + return {'etag': 'fake-md5-sum'} + + def get_object(self, container, name): + LOG.debug("fake get_object(%s, %s)" % (container, name)) + if container == 'socket_error_on_get': + raise socket.error(111, 'ECONNREFUSED') + if 'metadata' in name: + fake_object_header = None + metadata = {} + if container == 'unsupported_version': + metadata['version'] = '9.9.9' + else: + metadata['version'] = '1.0.0' + metadata['backup_id'] = 123 + metadata['volume_id'] = 123 + metadata['backup_name'] = 'fake backup' + metadata['backup_description'] = 'fake backup description' + metadata['created_at'] = '2013-02-19 11:20:54,805' + metadata['objects'] = [{ + 'backup_001': {'compression': 'zlib', 'length': 10}, + 'backup_002': {'compression': 'zlib', 'length': 10}, + 'backup_003': {'compression': 'zlib', 'length': 10} + }] + metadata_json = json.dumps(metadata, sort_keys=True, indent=2) + fake_object_body = metadata_json + return (fake_object_header, fake_object_body) + + fake_header = None + fake_object_body = os.urandom(1024 * 1024) + return (fake_header, zlib.compress(fake_object_body)) + + def put_object(self, container, name, reader): + LOG.debug("fake put_object(%s, %s)" % (container, name)) + if container == 'socket_error_on_put': + raise socket.error(111, 'ECONNREFUSED') + return 'fake-md5-sum' + + def delete_object(self, container, name): + LOG.debug("fake delete_object(%s, %s)" % (container, name)) + if container == 'socket_error_on_delete': + raise socket.error(111, 'ECONNREFUSED') + pass diff --git a/cinder/tests/brick/__init__.py b/cinder/tests/brick/__init__.py new file mode 100644 index 0000000000..5e8da711fb --- /dev/null +++ b/cinder/tests/brick/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/tests/brick/test_brick_lvm.py b/cinder/tests/brick/test_brick_lvm.py new file mode 100644 index 0000000000..9d3ac6b23e --- /dev/null +++ b/cinder/tests/brick/test_brick_lvm.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox + + +from cinder.brick.local_dev import lvm as brick +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import test +from cinder.volume import configuration as conf + +LOG = logging.getLogger(__name__) + + +def create_configuration(): + configuration = mox.MockObject(conf.Configuration) + configuration.append_config_values(mox.IgnoreArg()) + return configuration + + +class BrickLvmTestCase(test.TestCase): + def setUp(self): + self._mox = mox.Mox() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.volume_group_name = 'fake-volumes' + super(BrickLvmTestCase, self).setUp() + self.stubs.Set(processutils, 'execute', + self.fake_execute) + self.vg = brick.LVM(self.configuration.volume_group_name) + + def failed_fake_execute(obj, *cmd, **kwargs): + return ("\n", "fake-error") + + def fake_pretend_lvm_version(obj, *cmd, **kwargs): + return (" LVM version: 2.03.00 (2012-03-06)\n", "") + + def fake_old_lvm_version(obj, *cmd, **kwargs): + return (" LVM version: 2.02.65(2) (2012-03-06)\n", "") + + def fake_execute(obj, *cmd, **kwargs): + cmd_string = ', '.join(cmd) + data = "\n" + + if 'vgs, --noheadings, -o, name' == cmd_string: + data = " fake-volumes\n" + if 'vgs, --version' in cmd_string: + data = " LVM version: 2.02.95(2) (2012-03-06)\n" + elif 'vgs, --noheadings, -o uuid, fake-volumes' in cmd_string: + data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" + elif 'vgs, --noheadings, -o, name,size,free,lv_count,uuid' in\ + cmd_string: + data = " fake-volumes:10.00g:10.00g:0:"\ + "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" + if 'fake-volumes' in cmd_string: + return (data, "") + data += " fake-volumes-2:10.00g:10.00g:0:"\ + "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" + data += " fake-volumes-3:10.00g:10.00g:0:"\ + "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" + elif 'lvs, --noheadings, -o, vg_name,name,size' in cmd_string: + data = " fake-volumes fake-1 1.00g\n" + data += " fake-volumes fake-2 1.00g\n" + elif 'lvs, --noheadings, -o, vg_name,name,size' in cmd_string: + data = " fake-volumes fake-1 1.00g\n" + data += " fake-volumes fake-2 1.00g\n" + elif 'pvs, --noheadings' and 'fake-volumes' in cmd_string: + data = " fake-volumes:/dev/sda:10.00g:8.99g\n" + elif 'pvs, --noheadings' in cmd_string: + data = " fake-volumes:/dev/sda:10.00g:8.99g\n" + data += " fake-volumes-2:/dev/sdb:10.00g:8.99g\n" + data += " fake-volumes-3:/dev/sdc:10.00g:8.99g\n" + else: + pass + + return (data, "") + + def test_vg_exists(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertEqual(self.vg._vg_exists(), True) + + self.stubs.Set(processutils, 'execute', self.failed_fake_execute) + self.assertEqual(self.vg._vg_exists(), False) + + def test_get_vg_uuid(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertEqual(self.vg._get_vg_uuid()[0], + 'kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1') + + def test_get_all_volumes(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + out = self.vg.get_volumes() + + self.assertEqual(out[0]['name'], 'fake-1') + self.assertEqual(out[0]['size'], '1.00g') + self.assertEqual(out[0]['vg'], 'fake-volumes') + + def test_get_volume(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertEqual(self.vg.get_volume('fake-1')['name'], 'fake-1') + + def test_get_all_physical_volumes(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + pvs = self.vg.get_all_physical_volumes() + self.assertEqual(len(pvs), 3) + + def test_get_physical_volumes(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + pvs = self.vg.get_physical_volumes() + self.assertEqual(len(pvs), 1) + + def test_get_volume_groups(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertEqual(len(self.vg.get_all_volume_groups()), 3) + self.assertEqual(len(self.vg.get_all_volume_groups('fake-volumes')), 1) + + def test_update_vg_info(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertEqual(self.vg.update_volume_group_info()['name'], + 'fake-volumes') + + def test_thin_support(self): + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertTrue(self.vg.supports_thin_provisioning()) + + self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version) + self.assertTrue(self.vg.supports_thin_provisioning()) + + self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) + self.assertFalse(self.vg.supports_thin_provisioning()) diff --git a/cinder/tests/db/__init__.py b/cinder/tests/db/__init__.py new file mode 100644 index 0000000000..2d43aac42f --- /dev/null +++ b/cinder/tests/db/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`db` -- Stubs for DB API +============================= +""" diff --git a/cinder/tests/db/fakes.py b/cinder/tests/db/fakes.py new file mode 100644 index 0000000000..c831fdba2c --- /dev/null +++ b/cinder/tests/db/fakes.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite.""" + +from cinder import db + + +class FakeModel(object): + """Stubs out for model.""" + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def __getitem__(self, key): + if key in self.values: + return self.values[key] + else: + raise NotImplementedError() + + def __repr__(self): + return '' % self.values + + +def stub_out(stubs, funcs): + """Set the stubs in mapping in the db api.""" + for func in funcs: + func_name = '_'.join(func.__name__.split('_')[1:]) + stubs.Set(db, func_name, func) diff --git a/cinder/tests/declare_flags.py b/cinder/tests/declare_flags.py new file mode 100644 index 0000000000..4f569c0266 --- /dev/null +++ b/cinder/tests/declare_flags.py @@ -0,0 +1,24 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import flags + +FLAGS = flags.FLAGS +FLAGS.register_opt(cfg.IntOpt('answer', default=42, help='test flag')) diff --git a/cinder/tests/fake_driver.py b/cinder/tests/fake_driver.py new file mode 100644 index 0000000000..10f791e77b --- /dev/null +++ b/cinder/tests/fake_driver.py @@ -0,0 +1,118 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers import lvm + + +LOG = logging.getLogger(__name__) + + +class FakeISCSIDriver(lvm.LVMISCSIDriver): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, + *args, **kwargs) + + def check_for_setup_error(self): + """No setup necessary in fake mode.""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'iscsi', + 'data': {} + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + @staticmethod + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command.""" + LOG.debug(_("FAKE ISCSI: %s"), cmd) + return (None, None) + + +class LoggingVolumeDriver(driver.VolumeDriver): + """Logs and records calls, for unit tests.""" + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + self.log_action('create_volume', volume) + + def delete_volume(self, volume): + self.clear_volume(volume) + self.log_action('delete_volume', volume) + + def clear_volume(self, volume): + self.log_action('clear_volume', volume) + + def local_path(self, volume): + print "local_path not implemented" + raise NotImplementedError() + + def ensure_export(self, context, volume): + self.log_action('ensure_export', volume) + + def create_export(self, context, volume): + self.log_action('create_export', volume) + + def remove_export(self, context, volume): + self.log_action('remove_export', volume) + + def initialize_connection(self, volume, connector): + self.log_action('initialize_connection', volume) + + def terminate_connection(self, volume, connector): + self.log_action('terminate_connection', volume) + + _LOGS = [] + + @staticmethod + def clear_logs(): + LoggingVolumeDriver._LOGS = [] + + @staticmethod + def log_action(action, parameters): + """Logs the command.""" + LOG.debug(_("LoggingVolumeDriver: %s") % (action)) + log_dictionary = {} + if parameters: + log_dictionary = dict(parameters) + log_dictionary['action'] = action + LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary)) + LoggingVolumeDriver._LOGS.append(log_dictionary) + + @staticmethod + def all_logs(): + return LoggingVolumeDriver._LOGS + + @staticmethod + def logs_like(action, **kwargs): + matches = [] + for entry in LoggingVolumeDriver._LOGS: + if entry['action'] != action: + continue + match = True + for k, v in kwargs.iteritems(): + if entry.get(k) != v: + match = False + break + if match: + matches.append(entry) + return matches diff --git a/cinder/tests/fake_flags.py b/cinder/tests/fake_flags.py new file mode 100644 index 0000000000..900ea4b076 --- /dev/null +++ b/cinder/tests/fake_flags.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags + +FLAGS = flags.FLAGS + +flags.DECLARE('iscsi_num_targets', 'cinder.volume.drivers.lvm') +flags.DECLARE('policy_file', 'cinder.policy') +flags.DECLARE('volume_driver', 'cinder.volume.manager') +flags.DECLARE('xiv_proxy', 'cinder.volume.drivers.xiv') +flags.DECLARE('backup_service', 'cinder.backup.manager') + +def_vol_type = 'fake_vol_type' + + +def set_defaults(conf): + conf.set_default('default_volume_type', def_vol_type) + conf.set_default('volume_driver', + 'cinder.tests.fake_driver.FakeISCSIDriver') + conf.set_default('iscsi_helper', 'fake') + conf.set_default('connection_type', 'fake') + conf.set_default('fake_rabbit', True) + conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake') + conf.set_default('iscsi_num_targets', 8) + conf.set_default('verbose', True) + conf.set_default('sql_connection', "sqlite://") + conf.set_default('sqlite_synchronous', False) + conf.set_default('policy_file', 'cinder/tests/policy.json') + conf.set_default('xiv_proxy', 'cinder.tests.test_xiv.XIVFakeProxyDriver') + conf.set_default('backup_service', 'cinder.tests.backup.fake_service') diff --git a/cinder/tests/fake_utils.py b/cinder/tests/fake_utils.py new file mode 100644 index 0000000000..68aaf79646 --- /dev/null +++ b/cinder/tests/fake_utils.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""This modules stubs out functions in cinder.utils.""" + +import re + +from eventlet import greenthread + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + +_fake_execute_repliers = [] +_fake_execute_log = [] + + +def fake_execute_get_log(): + return _fake_execute_log + + +def fake_execute_clear_log(): + global _fake_execute_log + _fake_execute_log = [] + + +def fake_execute_set_repliers(repliers): + """Allows the client to configure replies to commands.""" + global _fake_execute_repliers + _fake_execute_repliers = repliers + + +def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): + """A reply handler for commands that haven't been added to the reply list. + + Returns empty strings for stdout and stderr. + + """ + return '', '' + + +def fake_execute(*cmd_parts, **kwargs): + """This function stubs out execute. + + It optionally executes a preconfigued function to return expected data. + + """ + global _fake_execute_repliers + + process_input = kwargs.get('process_input', None) + check_exit_code = kwargs.get('check_exit_code', 0) + delay_on_retry = kwargs.get('delay_on_retry', True) + attempts = kwargs.get('attempts', 1) + run_as_root = kwargs.get('run_as_root', False) + cmd_str = ' '.join(str(part) for part in cmd_parts) + + LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str) + _fake_execute_log.append(cmd_str) + + reply_handler = fake_execute_default_reply_handler + + for fake_replier in _fake_execute_repliers: + if re.match(fake_replier[0], cmd_str): + reply_handler = fake_replier[1] + LOG.debug(_('Faked command matched %s') % fake_replier[0]) + break + + if isinstance(reply_handler, basestring): + # If the reply handler is a string, return it as stdout + reply = reply_handler, '' + else: + try: + # Alternative is a function, so call it + reply = reply_handler(cmd_parts, + process_input=process_input, + delay_on_retry=delay_on_retry, + attempts=attempts, + run_as_root=run_as_root, + check_exit_code=check_exit_code) + except exception.ProcessExecutionError as e: + LOG.debug(_('Faked command raised an exception %s'), e) + raise + + stdout = reply[0] + stderr = reply[1] + LOG.debug(_("Reply to faked command is stdout='%(stdout)s' " + "stderr='%(stderr)s'") % locals()) + + # Replicate the sleep call in the real function + greenthread.sleep(0) + return reply + + +def stub_out_utils_execute(stubs): + fake_execute_set_repliers([]) + fake_execute_clear_log() + stubs.Set(utils, 'execute', fake_execute) diff --git a/cinder/tests/glance/__init__.py b/cinder/tests/glance/__init__.py new file mode 100644 index 0000000000..ef9fa05a7c --- /dev/null +++ b/cinder/tests/glance/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`glance` -- Stubs for Glance +================================= +""" diff --git a/cinder/tests/glance/stubs.py b/cinder/tests/glance/stubs.py new file mode 100644 index 0000000000..076afeffc9 --- /dev/null +++ b/cinder/tests/glance/stubs.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glanceclient.exc + + +NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" + + +class StubGlanceClient(object): + + def __init__(self, images=None): + self._images = [] + _images = images or [] + map(lambda image: self.create(**image), _images) + + #NOTE(bcwaldon): HACK to get client.images.* to work + self.images = lambda: None + for fn in ('list', 'get', 'data', 'create', 'update', 'delete'): + setattr(self.images, fn, getattr(self, fn)) + + #TODO(bcwaldon): implement filters + def list(self, filters=None, marker=None, limit=30): + if marker is None: + index = 0 + else: + for index, image in enumerate(self._images): + if image.id == str(marker): + index += 1 + break + else: + raise glanceclient.exc.BadRequest('Marker not found') + + return self._images[index:index + limit] + + def get(self, image_id): + for image in self._images: + if image.id == str(image_id): + return image + raise glanceclient.exc.NotFound(image_id) + + def data(self, image_id): + self.get(image_id) + return [] + + def create(self, **metadata): + metadata['created_at'] = NOW_GLANCE_FORMAT + metadata['updated_at'] = NOW_GLANCE_FORMAT + + self._images.append(FakeImage(metadata)) + + try: + image_id = str(metadata['id']) + except KeyError: + # auto-generate an id if one wasn't provided + image_id = str(len(self._images)) + + self._images[-1].id = image_id + + return self._images[-1] + + def update(self, image_id, **metadata): + for i, image in enumerate(self._images): + if image.id == str(image_id): + for k, v in metadata.items(): + setattr(self._images[i], k, v) + return self._images[i] + raise glanceclient.exc.NotFound(image_id) + + def delete(self, image_id): + for i, image in enumerate(self._images): + if image.id == image_id: + del self._images[i] + return + raise glanceclient.exc.NotFound(image_id) + + +class FakeImage(object): + def __init__(self, metadata): + IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', + 'container_format', 'checksum', 'id', + 'name', 'created_at', 'updated_at', + 'deleted', 'status', + 'min_disk', 'min_ram', 'is_public'] + raw = dict.fromkeys(IMAGE_ATTRIBUTES) + raw.update(metadata) + self.__dict__['raw'] = raw + + def __getattr__(self, key): + try: + return self.__dict__['raw'][key] + except KeyError: + raise AttributeError(key) + + def __setattr__(self, key, value): + try: + self.__dict__['raw'][key] = value + except KeyError: + raise AttributeError(key) diff --git a/cinder/tests/image/__init__.py b/cinder/tests/image/__init__.py new file mode 100644 index 0000000000..387872b21f --- /dev/null +++ b/cinder/tests/image/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work + +from cinder.tests import * diff --git a/cinder/tests/image/fake.py b/cinder/tests/image/fake.py new file mode 100644 index 0000000000..e8feb2433d --- /dev/null +++ b/cinder/tests/image/fake.py @@ -0,0 +1,243 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# Copyright 2012 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of a fake image service.""" + +import copy +import datetime +import uuid + +from cinder import exception +from cinder import flags +import cinder.image.glance +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +class _FakeImageService(object): + """Mock (fake) image service for unit testing.""" + + def __init__(self): + self.images = {} + # NOTE(justinsb): The OpenStack API can't upload an image? + # So, make sure we've got one.. + timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03) + + image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'raw', + 'disk_format': 'raw', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64'}} + + image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'ami', + 'disk_format': 'ami', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel'}} + + image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': None, + 'disk_format': None, + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel'}} + + image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'ami', + 'disk_format': 'ami', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel'}} + + image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'ami', + 'disk_format': 'ami', + 'properties': { + 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'ramdisk_id': None}} + + image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', + 'name': 'fakeimage6', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'ova', + 'disk_format': 'vhd', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64', + 'auto_disk_config': 'False'}} + + image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b', + 'name': 'fakeimage7', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'ova', + 'disk_format': 'vhd', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64', + 'auto_disk_config': 'True'}} + + self.create(None, image1) + self.create(None, image2) + self.create(None, image3) + self.create(None, image4) + self.create(None, image5) + self.create(None, image6) + self.create(None, image7) + self._imagedata = {} + super(_FakeImageService, self).__init__() + + #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir + def detail(self, context, **kwargs): + """Return list of detailed image information.""" + return copy.deepcopy(self.images.values()) + + def download(self, context, image_id, data): + self.show(context, image_id) + data.write(self._imagedata.get(image_id, '')) + + def show(self, context, image_id): + """Get data about specified image. + + Returns a dict containing image data for the given opaque image id. + + """ + image = self.images.get(str(image_id)) + if image: + return copy.deepcopy(image) + LOG.warn('Unable to find image id %s. Have images: %s', + image_id, self.images) + raise exception.ImageNotFound(image_id=image_id) + + def create(self, context, metadata, data=None): + """Store the image data and return the new image id. + + :raises: Duplicate if the image already exist. + + """ + image_id = str(metadata.get('id', uuid.uuid4())) + metadata['id'] = image_id + if image_id in self.images: + raise exception.Duplicate() + self.images[image_id] = copy.deepcopy(metadata) + if data: + self._imagedata[image_id] = data.read() + return self.images[image_id] + + def update(self, context, image_id, metadata, data=None, + purge_props=False): + """Replace the contents of the given image with the new data. + + :raises: ImageNotFound if the image does not exist. + + """ + if not self.images.get(image_id): + raise exception.ImageNotFound(image_id=image_id) + if purge_props: + self.images[image_id] = copy.deepcopy(metadata) + else: + image = self.images[image_id] + try: + image['properties'].update(metadata.pop('properties')) + except Exception: + pass + image.update(metadata) + return self.images[image_id] + + def delete(self, context, image_id): + """Delete the given image. + + :raises: ImageNotFound if the image does not exist. + + """ + removed = self.images.pop(image_id, None) + if not removed: + raise exception.ImageNotFound(image_id=image_id) + + def get_location(self, context, image_id): + if image_id in self.images: + return 'fake_location' + return None + +_fakeImageService = _FakeImageService() + + +def FakeImageService(): + return _fakeImageService + + +def FakeImageService_reset(): + global _fakeImageService + _fakeImageService = _FakeImageService() + + +def stub_out_image_service(stubs): + def fake_get_remote_image_service(context, image_href): + return (FakeImageService(), image_href) + stubs.Set(cinder.image.glance, 'get_remote_image_service', + lambda x, y: (FakeImageService(), y)) + stubs.Set(cinder.image.glance, 'get_default_image_service', + lambda: FakeImageService()) diff --git a/cinder/tests/image/test_glance.py b/cinder/tests/image/test_glance.py new file mode 100644 index 0000000000..46bbafa9c5 --- /dev/null +++ b/cinder/tests/image/test_glance.py @@ -0,0 +1,590 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import datetime +import random +import time + +import glanceclient.exc + +from cinder import context +from cinder import exception +from cinder import flags +from cinder.image import glance +from cinder import test +from cinder.tests.glance import stubs as glance_stubs +from glanceclient.v2.client import Client as glanceclient_v2 + + +FLAGS = flags.FLAGS + + +class NullWriter(object): + """Used to test ImageService.get which takes a writer object.""" + + def write(self, *arg, **kwargs): + pass + + +class TestGlanceSerializer(test.TestCase): + def test_serialize(self): + metadata = {'name': 'image1', + 'is_public': True, + 'foo': 'bar', + 'properties': { + 'prop1': 'propvalue1', + 'mappings': [ + {'virtual': 'aaa', + 'device': 'bbb'}, + {'virtual': 'xxx', + 'device': 'yyy'}], + 'block_device_mapping': [ + {'virtual_device': 'fake', + 'device_name': '/dev/fake'}, + {'virtual_device': 'ephemeral0', + 'device_name': '/dev/fake0'}]}} + + converted_expected = { + 'name': 'image1', + 'is_public': True, + 'foo': 'bar', + 'properties': { + 'prop1': 'propvalue1', + 'mappings': + '[{"device": "bbb", "virtual": "aaa"}, ' + '{"device": "yyy", "virtual": "xxx"}]', + 'block_device_mapping': + '[{"virtual_device": "fake", "device_name": "/dev/fake"}, ' + '{"virtual_device": "ephemeral0", ' + '"device_name": "/dev/fake0"}]'}} + converted = glance._convert_to_string(metadata) + self.assertEqual(converted, converted_expected) + self.assertEqual(glance._convert_from_string(converted), metadata) + + +class TestGlanceImageService(test.TestCase): + """ + Tests the Glance image service. + + At a high level, the translations involved are: + + 1. Glance -> ImageService - This is needed so we can support + multple ImageServices (Glance, Local, etc) + + 2. ImageService -> API - This is needed so we can support multple + APIs (OpenStack, EC2) + + """ + NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" + NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" + + class tzinfo(datetime.tzinfo): + @staticmethod + def utcoffset(*args, **kwargs): + return datetime.timedelta() + + NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) + + def setUp(self): + super(TestGlanceImageService, self).setUp() + #fakes.stub_out_compute_api_snapshot(self.stubs) + + client = glance_stubs.StubGlanceClient() + self.service = self._create_image_service(client) + self.context = context.RequestContext('fake', 'fake', auth_token=True) + self.stubs.Set(glance.time, 'sleep', lambda s: None) + + def _create_image_service(self, client): + def _fake_create_glance_client(context, host, port, use_ssl, version): + return client + + self.stubs.Set(glance, + '_create_glance_client', + _fake_create_glance_client) + + client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) + return glance.GlanceImageService(client=client_wrapper) + + @staticmethod + def _make_fixture(**kwargs): + fixture = {'name': None, + 'properties': {}, + 'status': None, + 'is_public': None} + fixture.update(kwargs) + return fixture + + def _make_datetime_fixture(self): + return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, + updated_at=self.NOW_GLANCE_FORMAT, + deleted_at=self.NOW_GLANCE_FORMAT) + + def test_create_with_instance_id(self): + """Ensure instance_id is persisted as an image-property.""" + fixture = {'name': 'test image', + 'is_public': False, + 'properties': {'instance_id': '42', 'user_id': 'fake'}} + + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.show(self.context, image_id) + expected = { + 'id': image_id, + 'name': 'test image', + 'is_public': False, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': self.NOW_DATETIME, + 'updated_at': self.NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'status': None, + 'properties': {'instance_id': '42', 'user_id': 'fake'}, + 'owner': None, + } + self.assertDictMatch(image_meta, expected) + + image_metas = self.service.detail(self.context) + self.assertDictMatch(image_metas[0], expected) + + def test_create_without_instance_id(self): + """ + Ensure we can create an image without having to specify an + instance_id. Public images are an example of an image not tied to an + instance. + """ + fixture = {'name': 'test image', 'is_public': False} + image_id = self.service.create(self.context, fixture)['id'] + + expected = { + 'id': image_id, + 'name': 'test image', + 'is_public': False, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': self.NOW_DATETIME, + 'updated_at': self.NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'status': None, + 'properties': {}, + 'owner': None, + } + actual = self.service.show(self.context, image_id) + self.assertDictMatch(actual, expected) + + def test_create(self): + fixture = self._make_fixture(name='test image') + num_images = len(self.service.detail(self.context)) + image_id = self.service.create(self.context, fixture)['id'] + + self.assertNotEquals(None, image_id) + self.assertEquals(num_images + 1, + len(self.service.detail(self.context))) + + def test_create_and_show_non_existing_image(self): + fixture = self._make_fixture(name='test image') + image_id = self.service.create(self.context, fixture)['id'] + + self.assertNotEquals(None, image_id) + self.assertRaises(exception.ImageNotFound, + self.service.show, + self.context, + 'bad image id') + + def test_detail_private_image(self): + fixture = self._make_fixture(name='test image') + fixture['is_public'] = False + properties = {'owner_id': 'proj1'} + fixture['properties'] = properties + + self.service.create(self.context, fixture)['id'] + + proj = self.context.project_id + self.context.project_id = 'proj1' + + image_metas = self.service.detail(self.context) + + self.context.project_id = proj + + self.assertEqual(1, len(image_metas)) + self.assertEqual(image_metas[0]['name'], 'test image') + self.assertEqual(image_metas[0]['is_public'], False) + + def test_detail_marker(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture(name='TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, marker=ids[1]) + self.assertEquals(len(image_metas), 8) + i = 2 + for meta in image_metas: + expected = { + 'id': ids[i], + 'status': None, + 'is_public': None, + 'name': 'TestImage %d' % (i), + 'properties': {}, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': self.NOW_DATETIME, + 'updated_at': self.NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'owner': None, + } + + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_detail_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture(name='TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, limit=5) + self.assertEquals(len(image_metas), 5) + + def test_detail_default_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture(name='TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context) + for i, meta in enumerate(image_metas): + self.assertEqual(meta['name'], 'TestImage %d' % (i)) + + def test_detail_marker_and_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture(name='TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, marker=ids[3], limit=5) + self.assertEquals(len(image_metas), 5) + i = 4 + for meta in image_metas: + expected = { + 'id': ids[i], + 'status': None, + 'is_public': None, + 'name': 'TestImage %d' % (i), + 'properties': {}, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': self.NOW_DATETIME, + 'updated_at': self.NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'owner': None, + } + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_detail_invalid_marker(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture(name='TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + self.assertRaises(exception.Invalid, self.service.detail, + self.context, marker='invalidmarker') + + def test_update(self): + fixture = self._make_fixture(name='test image') + image = self.service.create(self.context, fixture) + print image + image_id = image['id'] + fixture['name'] = 'new image name' + self.service.update(self.context, image_id, fixture) + + new_image_data = self.service.show(self.context, image_id) + self.assertEquals('new image name', new_image_data['name']) + + def test_delete(self): + fixture1 = self._make_fixture(name='test image 1') + fixture2 = self._make_fixture(name='test image 2') + fixtures = [fixture1, fixture2] + + num_images = len(self.service.detail(self.context)) + self.assertEquals(0, num_images) + + ids = [] + for fixture in fixtures: + new_id = self.service.create(self.context, fixture)['id'] + ids.append(new_id) + + num_images = len(self.service.detail(self.context)) + self.assertEquals(2, num_images) + + self.service.delete(self.context, ids[0]) + + num_images = len(self.service.detail(self.context)) + self.assertEquals(1, num_images) + + def test_show_passes_through_to_client(self): + fixture = self._make_fixture(name='image1', is_public=True) + image_id = self.service.create(self.context, fixture)['id'] + + image_meta = self.service.show(self.context, image_id) + expected = { + 'id': image_id, + 'name': 'image1', + 'is_public': True, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': self.NOW_DATETIME, + 'updated_at': self.NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'status': None, + 'properties': {}, + 'owner': None, + } + self.assertEqual(image_meta, expected) + + def test_show_raises_when_no_authtoken_in_the_context(self): + fixture = self._make_fixture(name='image1', + is_public=False, + properties={'one': 'two'}) + image_id = self.service.create(self.context, fixture)['id'] + self.context.auth_token = False + self.assertRaises(exception.ImageNotFound, + self.service.show, + self.context, + image_id) + + def test_detail_passes_through_to_client(self): + fixture = self._make_fixture(name='image10', is_public=True) + image_id = self.service.create(self.context, fixture)['id'] + image_metas = self.service.detail(self.context) + expected = [ + { + 'id': image_id, + 'name': 'image10', + 'is_public': True, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': self.NOW_DATETIME, + 'updated_at': self.NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'status': None, + 'properties': {}, + 'owner': None, + }, + ] + self.assertEqual(image_metas, expected) + + def test_show_makes_datetimes(self): + fixture = self._make_datetime_fixture() + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.show(self.context, image_id) + self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) + self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) + + def test_detail_makes_datetimes(self): + fixture = self._make_datetime_fixture() + self.service.create(self.context, fixture) + image_meta = self.service.detail(self.context)[0] + self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) + self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) + + def test_download_with_retries(self): + tries = [0] + + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that fails the first time, then succeeds.""" + def get(self, image_id): + if tries[0] == 0: + tries[0] = 1 + raise glanceclient.exc.ServiceUnavailable('') + else: + return {} + + client = MyGlanceStubClient() + service = self._create_image_service(client) + image_id = 1 # doesn't matter + writer = NullWriter() + + # When retries are disabled, we should get an exception + self.flags(glance_num_retries=0) + self.assertRaises(exception.GlanceConnectionFailed, + service.download, + self.context, + image_id, + writer) + + # Now lets enable retries. No exception should happen now. + tries = [0] + self.flags(glance_num_retries=1) + service.download(self.context, image_id, writer) + + def test_client_forbidden_converts_to_imagenotauthed(self): + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that raises a Forbidden exception.""" + def get(self, image_id): + raise glanceclient.exc.Forbidden(image_id) + + client = MyGlanceStubClient() + service = self._create_image_service(client) + image_id = 1 # doesn't matter + writer = NullWriter() + self.assertRaises(exception.ImageNotAuthorized, service.download, + self.context, image_id, writer) + + def test_client_httpforbidden_converts_to_imagenotauthed(self): + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that raises a HTTPForbidden exception.""" + def get(self, image_id): + raise glanceclient.exc.HTTPForbidden(image_id) + + client = MyGlanceStubClient() + service = self._create_image_service(client) + image_id = 1 # doesn't matter + writer = NullWriter() + self.assertRaises(exception.ImageNotAuthorized, service.download, + self.context, image_id, writer) + + def test_client_notfound_converts_to_imagenotfound(self): + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that raises a NotFound exception.""" + def get(self, image_id): + raise glanceclient.exc.NotFound(image_id) + + client = MyGlanceStubClient() + service = self._create_image_service(client) + image_id = 1 # doesn't matter + writer = NullWriter() + self.assertRaises(exception.ImageNotFound, service.download, + self.context, image_id, writer) + + def test_client_httpnotfound_converts_to_imagenotfound(self): + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that raises a HTTPNotFound exception.""" + def get(self, image_id): + raise glanceclient.exc.HTTPNotFound(image_id) + + client = MyGlanceStubClient() + service = self._create_image_service(client) + image_id = 1 # doesn't matter + writer = NullWriter() + self.assertRaises(exception.ImageNotFound, service.download, + self.context, image_id, writer) + + def test_glance_client_image_id(self): + fixture = self._make_fixture(name='test image') + image_id = self.service.create(self.context, fixture)['id'] + (service, same_id) = glance.get_remote_image_service(self.context, + image_id) + self.assertEquals(same_id, image_id) + + def test_glance_client_image_ref(self): + fixture = self._make_fixture(name='test image') + image_id = self.service.create(self.context, fixture)['id'] + image_url = 'http://something-less-likely/%s' % image_id + (service, same_id) = glance.get_remote_image_service(self.context, + image_url) + self.assertEquals(same_id, image_id) + self.assertEquals(service._client.host, + 'something-less-likely') + + +class TestGlanceClientVersion(test.TestCase): + """Tests the version of the glance client generated""" + def setUp(self): + super(TestGlanceClientVersion, self).setUp() + + def fake_get_image_model(self): + return + + self.stubs.Set(glanceclient_v2, '_get_image_model', + fake_get_image_model) + + def test_glance_version_by_flag(self): + """Test glance version set by flag is honoured""" + client_wrapper_v1 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292) + self.assertEquals(client_wrapper_v1.client.__module__, + 'glanceclient.v1.client') + self.flags(glance_api_version=2) + client_wrapper_v2 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292) + self.assertEquals(client_wrapper_v2.client.__module__, + 'glanceclient.v2.client') + FLAGS.reset() + + def test_glance_version_by_arg(self): + """Test glance version set by arg to GlanceClientWrapper""" + client_wrapper_v1 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292, version=1) + self.assertEquals(client_wrapper_v1.client.__module__, + 'glanceclient.v1.client') + client_wrapper_v2 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292, version=2) + self.assertEquals(client_wrapper_v2.client.__module__, + 'glanceclient.v2.client') + + +def _create_failing_glance_client(info): + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that fails the first time, then succeeds.""" + def get(self, image_id): + info['num_calls'] += 1 + if info['num_calls'] == 1: + raise glanceclient.exc.ServiceUnavailable('') + return {} + + return MyGlanceStubClient() diff --git a/cinder/tests/integrated/__init__.py b/cinder/tests/integrated/__init__.py new file mode 100644 index 0000000000..7c17b5ad74 --- /dev/null +++ b/cinder/tests/integrated/__init__.py @@ -0,0 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`integrated` -- Tests whole systems, using mock services where needed +================================= +""" +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/integrated/api/__init__.py b/cinder/tests/integrated/api/__init__.py new file mode 100644 index 0000000000..5798ab3d1e --- /dev/null +++ b/cinder/tests/integrated/api/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`api` -- OpenStack API client, for testing rather than production +================================= +""" diff --git a/cinder/tests/integrated/api/client.py b/cinder/tests/integrated/api/client.py new file mode 100644 index 0000000000..516a2a0e70 --- /dev/null +++ b/cinder/tests/integrated/api/client.py @@ -0,0 +1,219 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib +import urlparse + +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class OpenStackApiException(Exception): + def __init__(self, message=None, response=None): + self.response = response + if not message: + message = 'Unspecified error' + + if response: + _status = response.status + _body = response.read() + + message = _('%(message)s\nStatus Code: %(_status)s\n' + 'Body: %(_body)s') % locals() + + super(OpenStackApiException, self).__init__(message) + + +class OpenStackApiAuthenticationException(OpenStackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Authentication error") + super(OpenStackApiAuthenticationException, self).__init__(message, + response) + + +class OpenStackApiAuthorizationException(OpenStackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Authorization error") + super(OpenStackApiAuthorizationException, self).__init__(message, + response) + + +class OpenStackApiNotFoundException(OpenStackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Item not found") + super(OpenStackApiNotFoundException, self).__init__(message, response) + + +class TestOpenStackClient(object): + """Simple OpenStack API Client. + + This is a really basic OpenStack API client that is under our control, + so we can make changes / insert hooks for testing + + """ + + def __init__(self, auth_user, auth_key, auth_uri): + super(TestOpenStackClient, self).__init__() + self.auth_result = None + self.auth_user = auth_user + self.auth_key = auth_key + self.auth_uri = auth_uri + # default project_id + self.project_id = 'openstack' + + def request(self, url, method='GET', body=None, headers=None): + _headers = {'Content-Type': 'application/json'} + _headers.update(headers or {}) + + parsed_url = urlparse.urlparse(url) + port = parsed_url.port + hostname = parsed_url.hostname + scheme = parsed_url.scheme + + if scheme == 'http': + conn = httplib.HTTPConnection(hostname, + port=port) + elif scheme == 'https': + conn = httplib.HTTPSConnection(hostname, + port=port) + else: + raise OpenStackApiException("Unknown scheme: %s" % url) + + relative_url = parsed_url.path + if parsed_url.query: + relative_url = relative_url + "?" + parsed_url.query + LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) + if body: + LOG.info(_("Body: %s") % body) + + conn.request(method, relative_url, body, _headers) + response = conn.getresponse() + return response + + def _authenticate(self): + if self.auth_result: + return self.auth_result + + auth_uri = self.auth_uri + headers = {'X-Auth-User': self.auth_user, + 'X-Auth-Key': self.auth_key, + 'X-Auth-Project-Id': self.project_id} + response = self.request(auth_uri, + headers=headers) + + http_status = response.status + LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals()) + + if http_status == 401: + raise OpenStackApiAuthenticationException(response=response) + + auth_headers = {} + for k, v in response.getheaders(): + auth_headers[k] = v + + self.auth_result = auth_headers + return self.auth_result + + def api_request(self, relative_uri, check_response_status=None, **kwargs): + auth_result = self._authenticate() + + # NOTE(justinsb): httplib 'helpfully' converts headers to lower case + base_uri = auth_result['x-server-management-url'] + + full_uri = '%s/%s' % (base_uri, relative_uri) + + headers = kwargs.setdefault('headers', {}) + headers['X-Auth-Token'] = auth_result['x-auth-token'] + + response = self.request(full_uri, **kwargs) + + http_status = response.status + LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) + + if check_response_status: + if http_status not in check_response_status: + if http_status == 404: + raise OpenStackApiNotFoundException(response=response) + elif http_status == 401: + raise OpenStackApiAuthorizationException(response=response) + else: + raise OpenStackApiException( + message=_("Unexpected status code"), + response=response) + + return response + + def _decode_json(self, response): + body = response.read() + LOG.debug(_("Decoding JSON: %s") % (body)) + if body: + return jsonutils.loads(body) + else: + return "" + + def api_get(self, relative_uri, **kwargs): + kwargs.setdefault('check_response_status', [200]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_post(self, relative_uri, body, **kwargs): + kwargs['method'] = 'POST' + if body: + headers = kwargs.setdefault('headers', {}) + headers['Content-Type'] = 'application/json' + kwargs['body'] = jsonutils.dumps(body) + + kwargs.setdefault('check_response_status', [200, 202]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_put(self, relative_uri, body, **kwargs): + kwargs['method'] = 'PUT' + if body: + headers = kwargs.setdefault('headers', {}) + headers['Content-Type'] = 'application/json' + kwargs['body'] = jsonutils.dumps(body) + + kwargs.setdefault('check_response_status', [200, 202, 204]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_delete(self, relative_uri, **kwargs): + kwargs['method'] = 'DELETE' + kwargs.setdefault('check_response_status', [200, 202, 204]) + return self.api_request(relative_uri, **kwargs) + + def get_volume(self, volume_id): + return self.api_get('/volumes/%s' % volume_id)['volume'] + + def get_volumes(self, detail=True): + rel_url = '/volumes/detail' if detail else '/volumes' + return self.api_get(rel_url)['volumes'] + + def post_volume(self, volume): + return self.api_post('/volumes', volume)['volume'] + + def delete_volume(self, volume_id): + return self.api_delete('/volumes/%s' % volume_id) + + def put_volume(self, volume_id, volume): + return self.api_put('/volumes/%s' % volume_id, volume)['volume'] diff --git a/cinder/tests/integrated/integrated_helpers.py b/cinder/tests/integrated/integrated_helpers.py new file mode 100644 index 0000000000..ead8bc1dd6 --- /dev/null +++ b/cinder/tests/integrated/integrated_helpers.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Provides common functionality for integrated unit tests +""" + +import random +import string +import uuid + +from cinder.openstack.common import log as logging +from cinder import service +from cinder import test # For the flags +from cinder.tests.integrated.api import client + + +LOG = logging.getLogger(__name__) + + +def generate_random_alphanumeric(length): + """Creates a random alphanumeric string of specified length.""" + return ''.join(random.choice(string.ascii_uppercase + string.digits) + for _x in range(length)) + + +def generate_random_numeric(length): + """Creates a random numeric string of specified length.""" + return ''.join(random.choice(string.digits) + for _x in range(length)) + + +def generate_new_element(items, prefix, numeric=False): + """Creates a random string with prefix, that is not in 'items' list.""" + while True: + if numeric: + candidate = prefix + generate_random_numeric(8) + else: + candidate = prefix + generate_random_alphanumeric(8) + if candidate not in items: + return candidate + LOG.debug("Random collision on %s" % candidate) + + +class _IntegratedTestBase(test.TestCase): + def setUp(self): + super(_IntegratedTestBase, self).setUp() + + f = self._get_flags() + self.flags(**f) + self.flags(verbose=True) + + # set up services + self.volume = self.start_service('volume') + self.scheduler = self.start_service('scheduler') + + self._start_api_service() + + self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url) + + def tearDown(self): + self.osapi.stop() + super(_IntegratedTestBase, self).tearDown() + + def _start_api_service(self): + self.osapi = service.WSGIService("osapi_volume") + self.osapi.start() + # FIXME(ja): this is not the auth url - this is the service url + # FIXME(ja): this needs fixed in nova as well + self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port) + LOG.warn(self.auth_url) + + def _get_flags(self): + """An opportunity to setup flags, before the services are started.""" + f = {} + + # Ensure tests only listen on localhost + f['osapi_volume_listen'] = '127.0.0.1' + + # Auto-assign ports to allow concurrent tests + f['osapi_volume_listen_port'] = 0 + + return f + + def get_unused_server_name(self): + servers = self.api.get_servers() + server_names = [server['name'] for server in servers] + return generate_new_element(server_names, 'server') + + def get_invalid_image(self): + return str(uuid.uuid4()) + + def _build_minimal_create_server_request(self): + server = {} + + image = self.api.get_images()[0] + LOG.debug("Image: %s" % image) + + if 'imageRef' in image: + image_href = image['imageRef'] + else: + image_href = image['id'] + image_href = 'http://fake.server/%s' % image_href + + # We now have a valid imageId + server['imageRef'] = image_href + + # Set a valid flavorId + flavor = self.api.get_flavors()[0] + LOG.debug("Using flavor: %s" % flavor) + server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] + + # Set a valid server name + server_name = self.get_unused_server_name() + server['name'] = server_name + return server diff --git a/cinder/tests/integrated/test_extensions.py b/cinder/tests/integrated/test_extensions.py new file mode 100644 index 0000000000..363baed66b --- /dev/null +++ b/cinder/tests/integrated/test_extensions.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.tests.integrated import integrated_helpers + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class ExtensionsTest(integrated_helpers._IntegratedTestBase): + def _get_flags(self): + f = super(ExtensionsTest, self)._get_flags() + f['osapi_volume_extension'] = FLAGS.osapi_volume_extension[:] + f['osapi_volume_extension'].append( + 'cinder.tests.api.extensions.foxinsocks.Foxinsocks') + return f + + def test_get_foxnsocks(self): + """Simple check that fox-n-socks works.""" + response = self.api.api_request('/foxnsocks') + foxnsocks = response.read() + LOG.debug("foxnsocks: %s" % foxnsocks) + self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) diff --git a/cinder/tests/integrated/test_login.py b/cinder/tests/integrated/test_login.py new file mode 100644 index 0000000000..9c1515de82 --- /dev/null +++ b/cinder/tests/integrated/test_login.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common import log as logging +from cinder.tests.integrated import integrated_helpers + + +LOG = logging.getLogger(__name__) + + +class LoginTest(integrated_helpers._IntegratedTestBase): + def test_login(self): + """Simple check - we list volumes - so we know we're logged in.""" + volumes = self.api.get_volumes() + for volume in volumes: + LOG.debug(_("volume: %s") % volume) diff --git a/cinder/tests/integrated/test_volumes.py b/cinder/tests/integrated/test_volumes.py new file mode 100755 index 0000000000..1ba3d6ea0e --- /dev/null +++ b/cinder/tests/integrated/test_volumes.py @@ -0,0 +1,198 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import unittest + +from cinder.openstack.common import log as logging +from cinder import service +from cinder.tests import fake_driver +from cinder.tests.integrated.api import client +from cinder.tests.integrated import integrated_helpers +from cinder.volume import driver + + +LOG = logging.getLogger(__name__) + + +class VolumesTest(integrated_helpers._IntegratedTestBase): + def setUp(self): + super(VolumesTest, self).setUp() + fake_driver.LoggingVolumeDriver.clear_logs() + + def _start_api_service(self): + self.osapi = service.WSGIService("osapi_volume") + self.osapi.start() + self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port) + LOG.warn(self.auth_url) + + def _get_flags(self): + f = super(VolumesTest, self)._get_flags() + f['volume_driver'] = 'cinder.tests.fake_driver.LoggingVolumeDriver' + return f + + def test_get_volumes_summary(self): + """Simple check that listing volumes works.""" + volumes = self.api.get_volumes(False) + for volume in volumes: + LOG.debug("volume: %s" % volume) + + def test_get_volumes(self): + """Simple check that listing volumes works.""" + volumes = self.api.get_volumes() + for volume in volumes: + LOG.debug("volume: %s" % volume) + + def _poll_while(self, volume_id, continue_states, max_retries=5): + """Poll (briefly) while the state is in continue_states.""" + retries = 0 + while True: + try: + found_volume = self.api.get_volume(volume_id) + except client.OpenStackApiNotFoundException: + found_volume = None + LOG.debug("Got 404, proceeding") + break + + LOG.debug("Found %s" % found_volume) + + self.assertEqual(volume_id, found_volume['id']) + + if found_volume['status'] not in continue_states: + break + + time.sleep(1) + retries = retries + 1 + if retries > max_retries: + break + return found_volume + + def test_create_and_delete_volume(self): + """Creates and deletes a volume.""" + + # Create volume + created_volume = self.api.post_volume({'volume': {'size': 1}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + + # It should also be in the all-volume list + volumes = self.api.get_volumes() + volume_names = [volume['id'] for volume in volumes] + self.assertTrue(created_volume_id in volume_names) + + # Wait (briefly) for creation. Delay is due to the 'message queue' + found_volume = self._poll_while(created_volume_id, ['creating']) + + # It should be available... + self.assertEqual('available', found_volume['status']) + + # Delete the volume + self.api.delete_volume(created_volume_id) + + # Wait (briefly) for deletion. Delay is due to the 'message queue' + found_volume = self._poll_while(created_volume_id, ['deleting']) + + # Should be gone + self.assertFalse(found_volume) + + LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs()) + + create_actions = fake_driver.LoggingVolumeDriver.logs_like( + 'create_volume', + id=created_volume_id) + LOG.debug("Create_Actions: %s" % create_actions) + + self.assertEquals(1, len(create_actions)) + create_action = create_actions[0] + self.assertEquals(create_action['id'], created_volume_id) + self.assertEquals(create_action['availability_zone'], 'nova') + self.assertEquals(create_action['size'], 1) + + export_actions = fake_driver.LoggingVolumeDriver.logs_like( + 'create_export', + id=created_volume_id) + self.assertEquals(1, len(export_actions)) + export_action = export_actions[0] + self.assertEquals(export_action['id'], created_volume_id) + self.assertEquals(export_action['availability_zone'], 'nova') + + delete_actions = fake_driver.LoggingVolumeDriver.logs_like( + 'delete_volume', + id=created_volume_id) + self.assertEquals(1, len(delete_actions)) + delete_action = export_actions[0] + self.assertEquals(delete_action['id'], created_volume_id) + + def test_create_volume_with_metadata(self): + """Creates a volume with metadata.""" + + # Create volume + metadata = {'key1': 'value1', + 'key2': 'value2'} + created_volume = self.api.post_volume( + {'volume': {'size': 1, + 'metadata': metadata}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there and metadata present + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + self.assertEqual(metadata, found_volume['metadata']) + + def test_create_volume_in_availability_zone(self): + """Creates a volume in availability_zone.""" + + # Create volume + availability_zone = 'zone1:host1' + created_volume = self.api.post_volume( + {'volume': {'size': 1, + 'availability_zone': availability_zone}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there and availability zone present + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + self.assertEqual(availability_zone, found_volume['availability_zone']) + + def test_create_and_update_volume(self): + # Create vol1 + created_volume = self.api.post_volume({'volume': { + 'size': 1, 'display_name': 'vol1'}}) + self.assertEqual(created_volume['display_name'], 'vol1') + created_volume_id = created_volume['id'] + + # update volume + body = {'volume': {'display_name': 'vol-one'}} + updated_volume = self.api.put_volume(created_volume_id, body) + self.assertEqual(updated_volume['display_name'], 'vol-one') + + # check for update + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + self.assertEqual(found_volume['display_name'], 'vol-one') + +if __name__ == "__main__": + unittest.main() diff --git a/cinder/tests/integrated/test_xml.py b/cinder/tests/integrated/test_xml.py new file mode 100644 index 0000000000..2017a89e3f --- /dev/null +++ b/cinder/tests/integrated/test_xml.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree + +from cinder.api import common +from cinder.openstack.common import log as logging +from cinder.tests.integrated import integrated_helpers + + +LOG = logging.getLogger(__name__) + + +class XmlTests(integrated_helpers._IntegratedTestBase): + """"Some basic XML sanity checks.""" + + # FIXME(ja): does cinder need limits? + # def test_namespace_limits(self): + # headers = {} + # headers['Accept'] = 'application/xml' + + # response = self.api.api_request('/limits', headers=headers) + # data = response.read() + # LOG.debug("data: %s" % data) + # root = etree.XML(data) + # self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10) + + def test_namespace_volumes(self): + """/servers should have v1.1 namespace (has changed in 1.1).""" + headers = {} + headers['Accept'] = 'application/xml' + + response = self.api.api_request('/volumes', headers=headers) + data = response.read() + LOG.warn("data: %s" % data) + root = etree.XML(data) + self.assertEqual(root.nsmap.get(None), common.XML_NS_V1) diff --git a/cinder/tests/monkey_patch_example/__init__.py b/cinder/tests/monkey_patch_example/__init__.py new file mode 100644 index 0000000000..95f58e7511 --- /dev/null +++ b/cinder/tests/monkey_patch_example/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Example Module for testing utils.monkey_patch().""" + + +CALLED_FUNCTION = [] + + +def example_decorator(name, function): + """decorator for notify which is used from utils.monkey_patch(). + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + """ + def wrapped_func(*args, **kwarg): + CALLED_FUNCTION.append(name) + return function(*args, **kwarg) + return wrapped_func diff --git a/cinder/tests/monkey_patch_example/example_a.py b/cinder/tests/monkey_patch_example/example_a.py new file mode 100644 index 0000000000..21e79bcb0f --- /dev/null +++ b/cinder/tests/monkey_patch_example/example_a.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Example Module A for testing utils.monkey_patch().""" + + +def example_function_a(): + return 'Example function' + + +class ExampleClassA(): + def example_method(self): + return 'Example method' + + def example_method_add(self, arg1, arg2): + return arg1 + arg2 diff --git a/cinder/tests/monkey_patch_example/example_b.py b/cinder/tests/monkey_patch_example/example_b.py new file mode 100644 index 0000000000..9d8f6d339e --- /dev/null +++ b/cinder/tests/monkey_patch_example/example_b.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Example Module B for testing utils.monkey_patch().""" + + +def example_function_b(): + return 'Example function' + + +class ExampleClassB(): + def example_method(self): + return 'Example method' + + def example_method_add(self, arg1, arg2): + return arg1 + arg2 diff --git a/cinder/tests/policy.json b/cinder/tests/policy.json new file mode 100644 index 0000000000..a2c226f4bf --- /dev/null +++ b/cinder/tests/policy.json @@ -0,0 +1,42 @@ +{ + "context_is_admin": [["role:admin"]], + "admin_api": [["is_admin:True"]], + + "volume:create": [], + "volume:get": [], + "volume:get_all": [], + "volume:get_volume_metadata": [], + "volume:delete": [], + "volume:update": [], + "volume:delete_volume_metadata": [], + "volume:update_volume_metadata": [], + "volume:attach": [], + "volume:detach": [], + "volume:reserve_volume": [], + "volume:unreserve_volume": [], + "volume:begin_detaching": [], + "volume:roll_detaching": [], + "volume:check_attach": [], + "volume:check_detach": [], + "volume:initialize_connection": [], + "volume:terminate_connection": [], + "volume:create_snapshot": [], + "volume:delete_snapshot": [], + "volume:get_snapshot": [], + "volume:get_all_snapshots": [], + "volume:update_snapshot": [], + + "volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]], + "volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]], + "volume_extension:volume_admin_actions:force_delete": [["rule:admin_api"]], + "volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]], + "volume_extension:volume_admin_actions:force_detach": [["rule:admin_api"]], + "volume_extension:volume_actions:upload_image": [], + "volume_extension:types_manage": [], + "volume_extension:types_extra_specs": [], + "volume_extension:extended_snapshot_attributes": [], + "volume_extension:volume_image_metadata": [], + "volume_extension:volume_host_attribute": [["rule:admin_api"]], + "volume_extension:volume_tenant_attribute": [["rule:admin_api"]], + "volume_extension:hosts": [["rule:admin_api"]] +} diff --git a/cinder/tests/runtime_flags.py b/cinder/tests/runtime_flags.py new file mode 100644 index 0000000000..ad2994cde0 --- /dev/null +++ b/cinder/tests/runtime_flags.py @@ -0,0 +1,24 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import flags + +FLAGS = flags.FLAGS +FLAGS.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag')) diff --git a/cinder/tests/scheduler/__init__.py b/cinder/tests/scheduler/__init__.py new file mode 100644 index 0000000000..3be5ce944c --- /dev/null +++ b/cinder/tests/scheduler/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/scheduler/fakes.py b/cinder/tests/scheduler/fakes.py new file mode 100644 index 0000000000..08a65fa5da --- /dev/null +++ b/cinder/tests/scheduler/fakes.py @@ -0,0 +1,104 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Fakes For Scheduler tests. +""" + +import mox + +from cinder import db +from cinder.openstack.common import timeutils +from cinder.scheduler import filter_scheduler +from cinder.scheduler import host_manager + + +VOLUME_SERVICES = [ + dict(id=1, host='host1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=2, host='host2', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=3, host='host3', topic='volume', disabled=False, + availability_zone='zone2', updated_at=timeutils.utcnow()), + dict(id=4, host='host4', topic='volume', disabled=False, + availability_zone='zone3', updated_at=timeutils.utcnow()), + # service on host5 is disabled + dict(id=5, host='host5', topic='volume', disabled=True, + availability_zone='zone4', updated_at=timeutils.utcnow()), +] + +SHARE_SERVICES = [ + dict(id=1, host='host1', topic='share', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=2, host='host2', topic='share', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=3, host='host3', topic='share', disabled=False, + availability_zone='zone2', updated_at=timeutils.utcnow()), + dict(id=4, host='host4', topic='share', disabled=False, + availability_zone='zone3', updated_at=timeutils.utcnow()), + # service on host5 is disabled + dict(id=5, host='host5', topic='share', disabled=True, + availability_zone='zone4', updated_at=timeutils.utcnow()), +] + + +class FakeFilterScheduler(filter_scheduler.FilterScheduler): + def __init__(self, *args, **kwargs): + super(FakeFilterScheduler, self).__init__(*args, **kwargs) + self.host_manager = host_manager.HostManager() + + +class FakeHostManager(host_manager.HostManager): + def __init__(self): + super(FakeHostManager, self).__init__() + + self.service_states = { + 'host1': {'total_capacity_gb': 1024, + 'free_capacity_gb': 1024, + 'reserved_percentage': 10, + 'timestamp': None}, + 'host2': {'total_capacity_gb': 2048, + 'free_capacity_gb': 300, + 'reserved_percentage': 10, + 'timestamp': None}, + 'host3': {'total_capacity_gb': 512, + 'free_capacity_gb': 512, + 'reserved_percentage': 0, + 'timestamp': None}, + 'host4': {'total_capacity_gb': 2048, + 'free_capacity_gb': 200, + 'reserved_percentage': 5, + 'timestamp': None}, + } + + +class FakeHostState(host_manager.HostState): + def __init__(self, host, attribute_dict): + super(FakeHostState, self).__init__(host) + for (key, val) in attribute_dict.iteritems(): + setattr(self, key, val) + + +def mox_host_manager_db_calls(mock, context): + mock.StubOutWithMock(db, 'service_get_all_by_topic') + + db.service_get_all_by_topic(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(VOLUME_SERVICES) + + +def mox_host_manager_db_calls_share(mock, context): + mock.StubOutWithMock(db, 'service_get_all_by_topic') + + db.service_get_all_by_topic(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(SHARE_SERVICES) diff --git a/cinder/tests/scheduler/test_capacity_weigher.py b/cinder/tests/scheduler/test_capacity_weigher.py new file mode 100644 index 0000000000..a569ff2552 --- /dev/null +++ b/cinder/tests/scheduler/test_capacity_weigher.py @@ -0,0 +1,94 @@ +# Copyright 2011-2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Capacity Weigher. +""" + +from cinder import context +from cinder.openstack.common.scheduler.weights import HostWeightHandler +from cinder import test +from cinder.tests.scheduler import fakes +from cinder.tests import utils as test_utils + + +class CapacityWeigherTestCase(test.TestCase): + def setUp(self): + super(CapacityWeigherTestCase, self).setUp() + self.host_manager = fakes.FakeHostManager() + self.weight_handler = HostWeightHandler('cinder.scheduler.weights') + self.weight_classes = self.weight_handler.get_all_classes() + + def _get_weighed_host(self, hosts, weight_properties=None): + if weight_properties is None: + weight_properties = {} + return self.weight_handler.get_weighed_objects(self.weight_classes, + hosts, + weight_properties)[0] + + def _get_all_hosts(self): + ctxt = context.get_admin_context() + fakes.mox_host_manager_db_calls(self.mox, ctxt) + self.mox.ReplayAll() + host_states = self.host_manager.get_all_host_states(ctxt) + self.mox.VerifyAll() + self.mox.ResetAll() + return host_states + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_default_of_spreading_first(self): + hostinfo_list = self._get_all_hosts() + + # host1: free_capacity_gb=1024, free=1024*(1-0.1) + # host2: free_capacity_gb=300, free=300*(1-0.1) + # host3: free_capacity_gb=512, free=512 + # host4: free_capacity_gb=200, free=200*(1-0.05) + + # so, host1 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 921.0) + self.assertEqual(weighed_host.obj.host, 'host1') + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_capacity_weight_multiplier1(self): + self.flags(capacity_weight_multiplier=-1.0) + hostinfo_list = self._get_all_hosts() + + # host1: free_capacity_gb=1024, free=-1024*(1-0.1) + # host2: free_capacity_gb=300, free=-300*(1-0.1) + # host3: free_capacity_gb=512, free=-512 + # host4: free_capacity_gb=200, free=-200*(1-0.05) + + # so, host4 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, -190.0) + self.assertEqual(weighed_host.obj.host, 'host4') + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_capacity_weight_multiplier2(self): + self.flags(capacity_weight_multiplier=2.0) + hostinfo_list = self._get_all_hosts() + + # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2 + # host2: free_capacity_gb=300, free=300*(1-0.1)*2 + # host3: free_capacity_gb=512, free=512*2 + # host4: free_capacity_gb=200, free=200*(1-0.05)*2 + + # so, host1 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 921.0 * 2) + self.assertEqual(weighed_host.obj.host, 'host1') diff --git a/cinder/tests/scheduler/test_filter_scheduler.py b/cinder/tests/scheduler/test_filter_scheduler.py new file mode 100644 index 0000000000..7269aa6aba --- /dev/null +++ b/cinder/tests/scheduler/test_filter_scheduler.py @@ -0,0 +1,302 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Filter Scheduler. +""" + +from cinder import context +from cinder import exception +from cinder import test + +from cinder.openstack.common.scheduler import weights +from cinder.scheduler import filter_scheduler +from cinder.scheduler import host_manager +from cinder.tests.scheduler import fakes +from cinder.tests.scheduler import test_scheduler +from cinder.tests import utils as test_utils + + +def fake_get_filtered_hosts(hosts, filter_properties): + return list(hosts) + + +class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): + """Test case for Filter Scheduler.""" + + driver_cls = filter_scheduler.FilterScheduler + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_create_volume_no_hosts(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + def _fake_empty_call_zone_method(*args, **kwargs): + return [] + + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project') + request_spec = {'volume_properties': {'project_id': 1, + 'size': 1}, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_id': ['fake-id1']} + self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, + fake_context, request_spec, {}) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_create_volume_non_admin(self): + """Test creating an instance locally using run_instance, passing + a non-admin context. DB actions should work.""" + self.was_admin = False + + def fake_get(context, *args, **kwargs): + # make sure this is called with admin context, even though + # we're using user context below + self.was_admin = context.is_admin + return {} + + sched = fakes.FakeFilterScheduler() + self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get) + + fake_context = context.RequestContext('user', 'project') + + request_spec = {'volume_properties': {'project_id': 1, + 'size': 1}, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_id': ['fake-id1']} + self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, + fake_context, request_spec, {}) + self.assertTrue(self.was_admin) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_schedule_happy_day(self): + """Make sure there's nothing glaringly wrong with _schedule() + by doing a happy day pass through.""" + + self.next_weight = 1.0 + + def _fake_weigh_objects(_self, functions, hosts, options): + self.next_weight += 2.0 + host_state = hosts[0] + return [weights.WeighedHost(host_state, self.next_weight)] + + sched = fakes.FakeFilterScheduler() + fake_context = context.RequestContext('user', 'project', + is_admin=True) + + self.stubs.Set(sched.host_manager, 'get_filtered_hosts', + fake_get_filtered_hosts) + self.stubs.Set(weights.HostWeightHandler, + 'get_weighed_objects', _fake_weigh_objects) + fakes.mox_host_manager_db_calls(self.mox, fake_context) + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + self.mox.ReplayAll() + weighed_host = sched._schedule(fake_context, request_spec, {}) + self.assertTrue(weighed_host.obj is not None) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_create_share_no_hosts(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + def _fake_empty_call_zone_method(*args, **kwargs): + return [] + + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project') + request_spec = {'share_properties': {'project_id': 1, + 'size': 1}, + 'share_type': {'name': 'LVM_NFS'}, + 'share_id': ['fake-id1']} + self.assertRaises(exception.NoValidHost, sched.schedule_create_share, + fake_context, request_spec, {}) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_create_share_non_admin(self): + """Test creating share passing a non-admin context. + + DB actions should work.""" + self.was_admin = False + + def fake_get(context, *args, **kwargs): + # make sure this is called with admin context, even though + # we're using user context below + self.was_admin = context.is_admin + return {} + + sched = fakes.FakeFilterScheduler() + self.stubs.Set(sched.host_manager, + 'get_all_host_states_share', + fake_get) + + fake_context = context.RequestContext('user', 'project') + + request_spec = {'share_properties': {'project_id': 1, + 'size': 1}, + 'share_type': {'name': 'LVM_NFS'}, + 'share_id': ['fake-id1']} + self.assertRaises(exception.NoValidHost, sched.schedule_create_share, + fake_context, request_spec, {}) + self.assertTrue(self.was_admin) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_schedule_happy_day_share(self): + """Make sure there's nothing glaringly wrong with _schedule_share() + by doing a happy day pass through.""" + + self.next_weight = 1.0 + + def _fake_weigh_objects(_self, functions, hosts, options): + self.next_weight += 2.0 + host_state = hosts[0] + return [weights.WeighedHost(host_state, self.next_weight)] + + sched = fakes.FakeFilterScheduler() + fake_context = context.RequestContext('user', 'project', + is_admin=True) + + self.stubs.Set(sched.host_manager, 'get_filtered_hosts', + fake_get_filtered_hosts) + self.stubs.Set(weights.HostWeightHandler, + 'get_weighed_objects', _fake_weigh_objects) + fakes.mox_host_manager_db_calls_share(self.mox, fake_context) + + request_spec = {'share_type': {'name': 'LVM_NFS'}, + 'sharee_properties': {'project_id': 1, + 'size': 1}} + self.mox.ReplayAll() + weighed_host = sched._schedule_share(fake_context, request_spec, {}) + self.assertTrue(weighed_host.obj is not None) + + def test_max_attempts(self): + self.flags(scheduler_max_attempts=4) + + sched = fakes.FakeFilterScheduler() + self.assertEqual(4, sched._max_attempts()) + + def test_invalid_max_attempts(self): + self.flags(scheduler_max_attempts=0) + + self.assertRaises(exception.InvalidParameterValue, + fakes.FakeFilterScheduler) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_retry_disabled(self): + # Retry info should not get populated when re-scheduling is off. + self.flags(scheduler_max_attempts=1) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + filter_properties = {} + + sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + # should not have retry info in the populated filter properties: + self.assertFalse("retry" in filter_properties) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_retry_attempt_one(self): + # Test retry logic on initial scheduling attempt. + self.flags(scheduler_max_attempts=2) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + filter_properties = {} + + sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + num_attempts = filter_properties['retry']['num_attempts'] + self.assertEqual(1, num_attempts) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed (try setup.py develop') + def test_retry_attempt_two(self): + # Test retry logic when re-scheduling. + self.flags(scheduler_max_attempts=2) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + + retry = dict(num_attempts=1) + filter_properties = dict(retry=retry) + + sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + num_attempts = filter_properties['retry']['num_attempts'] + self.assertEqual(2, num_attempts) + + def test_retry_exceeded_max_attempts(self): + # Test for necessary explosion when max retries is exceeded. + self.flags(scheduler_max_attempts=2) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + + retry = dict(num_attempts=2) + filter_properties = dict(retry=retry) + + self.assertRaises(exception.NoValidHost, sched._schedule, self.context, + request_spec, filter_properties=filter_properties) + + def test_add_retry_host(self): + retry = dict(num_attempts=1, hosts=[]) + filter_properties = dict(retry=retry) + host = "fakehost" + + sched = fakes.FakeFilterScheduler() + sched._add_retry_host(filter_properties, host) + + hosts = filter_properties['retry']['hosts'] + self.assertEqual(1, len(hosts)) + self.assertEqual(host, hosts[0]) + + def test_post_select_populate(self): + # Test addition of certain filter props after a node is selected. + retry = {'hosts': [], 'num_attempts': 1} + filter_properties = {'retry': retry} + sched = fakes.FakeFilterScheduler() + + host_state = host_manager.HostState('host') + host_state.total_capacity_gb = 1024 + sched._post_select_populate_filter_properties(filter_properties, + host_state) + + self.assertEqual('host', + filter_properties['retry']['hosts'][0]) + + self.assertEqual(1024, host_state.total_capacity_gb) diff --git a/cinder/tests/scheduler/test_host_filters.py b/cinder/tests/scheduler/test_host_filters.py new file mode 100644 index 0000000000..810056fd1a --- /dev/null +++ b/cinder/tests/scheduler/test_host_filters.py @@ -0,0 +1,159 @@ +# Copyright 2011 OpenStack LLC. # All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filters. +""" + +import httplib +import stubout + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder.openstack.common.scheduler import filters +from cinder import test +from cinder.tests.scheduler import fakes +from cinder.tests import utils as test_utils +from cinder import utils + + +DATA = '' + + +def stub_out_https_backend(stubs): + """ + Stubs out the httplib.HTTPRequest.getresponse to return + faked-out data instead of grabbing actual contents of a resource + + The stubbed getresponse() returns an iterator over + the data "I am a teapot, short and stout\n" + + :param stubs: Set of stubout stubs + """ + + class FakeHTTPResponse(object): + + def read(self): + return DATA + + def fake_do_request(self, *args, **kwargs): + return httplib.OK, FakeHTTPResponse() + + +class HostFiltersTestCase(test.TestCase): + """Test case for host filters.""" + + def setUp(self): + super(HostFiltersTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + stub_out_https_backend(self.stubs) + self.context = context.RequestContext('fake', 'fake') + self.json_query = jsonutils.dumps( + ['and', ['>=', '$free_capacity_gb', 1024], + ['>=', '$total_capacity_gb', 10 * 1024]]) + # This has a side effect of testing 'get_filter_classes' + # when specifying a method (in this case, our standard filters) + filter_handler = filters.HostFilterHandler('cinder.scheduler.filters') + classes = filter_handler.get_all_classes() + self.class_map = {} + for cls in classes: + self.class_map[cls.__name__] = cls + + def _stub_service_is_up(self, ret_value): + def fake_service_is_up(service): + return ret_value + self.stubs.Set(utils, 'service_is_up', fake_service_is_up) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_capacity_filter_passes(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 200, + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_capacity_filter_fails(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 120, + 'reserved_percentage': 20, + 'updated_at': None, + 'service': service}) + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_capacity_filter_passes_infinite(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 'infinite', + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_capacity_filter_passes_unknown(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 'unknown', + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_retry_filter_disabled(self): + # Test case where retry/re-scheduling is disabled. + filt_cls = self.class_map['RetryFilter']() + host = fakes.FakeHostState('host1', {}) + filter_properties = {} + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_retry_filter_pass(self): + # Node not previously tried. + filt_cls = self.class_map['RetryFilter']() + host = fakes.FakeHostState('host1', {}) + retry = dict(num_attempts=2, hosts=['host2']) + filter_properties = dict(retry=retry) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @test.skip_if(not test_utils.is_cinder_installed(), + 'Test requires Cinder installed') + def test_retry_filter_fail(self): + # Node was already tried. + filt_cls = self.class_map['RetryFilter']() + host = fakes.FakeHostState('host1', {}) + retry = dict(num_attempts=1, hosts=['host1']) + filter_properties = dict(retry=retry) + self.assertFalse(filt_cls.host_passes(host, filter_properties)) diff --git a/cinder/tests/scheduler/test_host_manager.py b/cinder/tests/scheduler/test_host_manager.py new file mode 100644 index 0000000000..95a8e0d7d6 --- /dev/null +++ b/cinder/tests/scheduler/test_host_manager.py @@ -0,0 +1,299 @@ +# Copyright (c) 2011 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For HostManager +""" + + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common import timeutils +from cinder.scheduler import host_manager +from cinder import test +from cinder.tests.scheduler import fakes + + +FLAGS = flags.FLAGS + + +class FakeFilterClass1(filters.BaseHostFilter): + def host_passes(self, host_state, filter_properties): + pass + + +class FakeFilterClass2(filters.BaseHostFilter): + def host_passes(self, host_state, filter_properties): + pass + + +class HostManagerTestCase(test.TestCase): + """Test case for HostManager class""" + + def setUp(self): + super(HostManagerTestCase, self).setUp() + self.host_manager = host_manager.HostManager() + self.fake_hosts = [host_manager.HostState('fake_host%s' % x) + for x in xrange(1, 5)] + + def test_choose_host_filters_not_found(self): + self.flags(scheduler_default_filters='FakeFilterClass3') + self.host_manager.filter_classes = [FakeFilterClass1, + FakeFilterClass2] + self.assertRaises(exception.SchedulerHostFilterNotFound, + self.host_manager._choose_host_filters, None) + + def test_choose_host_filters(self): + self.flags(scheduler_default_filters=['FakeFilterClass2']) + self.host_manager.filter_classes = [FakeFilterClass1, + FakeFilterClass2] + + # Test 'volume' returns 1 correct function + filter_classes = self.host_manager._choose_host_filters(None) + self.assertEqual(len(filter_classes), 1) + self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2') + + def _mock_get_filtered_hosts(self, info, specified_filters=None): + self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters') + + info['got_objs'] = [] + info['got_fprops'] = [] + + def fake_filter_one(_self, obj, filter_props): + info['got_objs'].append(obj) + info['got_fprops'].append(filter_props) + return True + + self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one) + self.host_manager._choose_host_filters(specified_filters).AndReturn( + [FakeFilterClass1]) + + def _verify_result(self, info, result): + for x in info['got_fprops']: + self.assertEqual(x, info['expected_fprops']) + self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) + self.assertEqual(set(result), set(info['got_objs'])) + + def test_get_filtered_hosts(self): + fake_properties = {'moo': 1, 'cow': 2} + + info = {'expected_objs': self.fake_hosts, + 'expected_fprops': fake_properties} + + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result) + + def test_update_service_capabilities(self): + service_states = self.host_manager.service_states + self.assertDictMatch(service_states, {}) + self.mox.StubOutWithMock(timeutils, 'utcnow') + timeutils.utcnow().AndReturn(31337) + timeutils.utcnow().AndReturn(31338) + timeutils.utcnow().AndReturn(31339) + + host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1) + host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1) + host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1) + + self.mox.ReplayAll() + service_name = 'volume' + self.host_manager.update_service_capabilities(service_name, 'host1', + host1_volume_capabs) + self.host_manager.update_service_capabilities(service_name, 'host2', + host2_volume_capabs) + self.host_manager.update_service_capabilities(service_name, 'host3', + host3_volume_capabs) + + # Make sure dictionary isn't re-assigned + self.assertEqual(self.host_manager.service_states, service_states) + # Make sure original dictionary wasn't copied + self.assertEqual(host1_volume_capabs['timestamp'], 1) + + host1_volume_capabs['timestamp'] = 31337 + host2_volume_capabs['timestamp'] = 31338 + host3_volume_capabs['timestamp'] = 31339 + + expected = {'host1': host1_volume_capabs, + 'host2': host2_volume_capabs, + 'host3': host3_volume_capabs} + self.assertDictMatch(service_states, expected) + + def test_get_all_host_states(self): + context = 'fake_context' + topic = FLAGS.volume_topic + + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + self.mox.StubOutWithMock(host_manager.LOG, 'warn') + + ret_services = fakes.VOLUME_SERVICES + db.service_get_all_by_topic(context, topic).AndReturn(ret_services) + # Disabled service + host_manager.LOG.warn("service is down or disabled.") + + self.mox.ReplayAll() + self.host_manager.get_all_host_states(context) + host_state_map = self.host_manager.host_state_map + + self.assertEqual(len(host_state_map), 4) + # Check that service is up + for i in xrange(4): + volume_node = fakes.VOLUME_SERVICES[i] + host = volume_node['host'] + self.assertEqual(host_state_map[host].service, + volume_node) + + def test_update_service_capabilities_for_shares(self): + service_states = self.host_manager.service_states + self.assertDictMatch(service_states, {}) + self.mox.StubOutWithMock(timeutils, 'utcnow') + timeutils.utcnow().AndReturn(31337) + timeutils.utcnow().AndReturn(31338) + timeutils.utcnow().AndReturn(31339) + + host1_share_capabs = dict(free_capacity_gb=4321, timestamp=1) + host2_share_capabs = dict(free_capacity_gb=5432, timestamp=1) + host3_share_capabs = dict(free_capacity_gb=6543, timestamp=1) + + self.mox.ReplayAll() + service_name = 'share' + self.host_manager.update_service_capabilities(service_name, 'host1', + host1_share_capabs) + self.host_manager.update_service_capabilities(service_name, 'host2', + host2_share_capabs) + self.host_manager.update_service_capabilities(service_name, 'host3', + host3_share_capabs) + + # Make sure dictionary isn't re-assigned + self.assertEqual(self.host_manager.service_states, service_states) + # Make sure original dictionary wasn't copied + self.assertEqual(host1_share_capabs['timestamp'], 1) + + host1_share_capabs['timestamp'] = 31337 + host2_share_capabs['timestamp'] = 31338 + host3_share_capabs['timestamp'] = 31339 + + expected = {'host1': host1_share_capabs, + 'host2': host2_share_capabs, + 'host3': host3_share_capabs} + self.assertDictMatch(service_states, expected) + + def test_get_all_host_states_share(self): + context = 'fake_context' + topic = FLAGS.share_topic + + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + self.mox.StubOutWithMock(host_manager.LOG, 'warn') + + ret_services = fakes.SHARE_SERVICES + db.service_get_all_by_topic(context, topic).AndReturn(ret_services) + # Disabled service + host_manager.LOG.warn("service is down or disabled.") + + self.mox.ReplayAll() + self.host_manager.get_all_host_states_share(context) + host_state_map = self.host_manager.host_state_map + + self.assertEqual(len(host_state_map), 4) + # Check that service is up + for i in xrange(4): + share_node = fakes.SHARE_SERVICES[i] + host = share_node['host'] + self.assertEqual(host_state_map[host].service, + share_node) + + +class HostStateTestCase(test.TestCase): + """Test case for HostState class.""" + + def test_update_from_volume_capability(self): + fake_host = host_manager.HostState('host1') + self.assertEqual(fake_host.free_capacity_gb, None) + + volume_capability = {'total_capacity_gb': 1024, + 'free_capacity_gb': 512, + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_volume_capability(volume_capability) + self.assertEqual(fake_host.free_capacity_gb, 512) + + def test_update_from_volume_infinite_capability(self): + fake_host = host_manager.HostState('host1') + self.assertEqual(fake_host.free_capacity_gb, None) + + volume_capability = {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'infinite', + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_volume_capability(volume_capability) + self.assertEqual(fake_host.total_capacity_gb, 'infinite') + self.assertEqual(fake_host.free_capacity_gb, 'infinite') + + def test_update_from_volume_unknown_capability(self): + fake_host = host_manager.HostState('host1') + self.assertEqual(fake_host.free_capacity_gb, None) + + volume_capability = {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_volume_capability(volume_capability) + self.assertEqual(fake_host.total_capacity_gb, 'infinite') + self.assertEqual(fake_host.free_capacity_gb, 'unknown') + + def test_update_from_share_capability(self): + fake_host = host_manager.HostState('host1') + self.assertEqual(fake_host.free_capacity_gb, None) + + share_capability = {'total_capacity_gb': 1024, + 'free_capacity_gb': 512, + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_share_capability(share_capability) + self.assertEqual(fake_host.free_capacity_gb, 512) + + def test_update_from_share_infinite_capability(self): + fake_host = host_manager.HostState('host1') + self.assertEqual(fake_host.free_capacity_gb, None) + + share_capability = {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'infinite', + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_share_capability(share_capability) + self.assertEqual(fake_host.total_capacity_gb, 'infinite') + self.assertEqual(fake_host.free_capacity_gb, 'infinite') + + def test_update_from_share_unknown_capability(self): + fake_host = host_manager.HostState('host1') + self.assertEqual(fake_host.free_capacity_gb, None) + + share_capability = {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_share_capability(share_capability) + self.assertEqual(fake_host.total_capacity_gb, 'infinite') + self.assertEqual(fake_host.free_capacity_gb, 'unknown') diff --git a/cinder/tests/scheduler/test_rpcapi.py b/cinder/tests/scheduler/test_rpcapi.py new file mode 100644 index 0000000000..1032f429f4 --- /dev/null +++ b/cinder/tests/scheduler/test_rpcapi.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for cinder.scheduler.rpcapi +""" + +from cinder import context +from cinder import flags +from cinder.openstack.common import rpc +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder import test + + +FLAGS = flags.FLAGS + + +class SchedulerRpcAPITestCase(test.TestCase): + + def setUp(self): + super(SchedulerRpcAPITestCase, self).setUp() + + def tearDown(self): + super(SchedulerRpcAPITestCase, self).tearDown() + + def _test_scheduler_api(self, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + rpcapi = scheduler_rpcapi.SchedulerAPI() + expected_retval = 'foo' if method == 'call' else None + expected_version = kwargs.pop('version', rpcapi.RPC_API_VERSION) + expected_msg = rpcapi.make_msg(method, **kwargs) + expected_msg['version'] = expected_version + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.stubs.Set(rpc, rpc_method, _fake_rpc_method) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected_args = [ctxt, FLAGS.scheduler_topic, expected_msg] + for arg, expected_arg in zip(self.fake_args, expected_args): + self.assertEqual(arg, expected_arg) + + def test_update_service_capabilities(self): + self._test_scheduler_api('update_service_capabilities', + rpc_method='fanout_cast', + service_name='fake_name', + host='fake_host', + capabilities='fake_capabilities') + + def test_create_volume(self): + self._test_scheduler_api('create_volume', + rpc_method='cast', + topic='topic', + volume_id='volume_id', + snapshot_id='snapshot_id', + image_id='image_id', + request_spec='fake_request_spec', + filter_properties='filter_properties', + version='1.2') + + def test_create_share(self): + self._test_scheduler_api('create_share', + rpc_method='cast', + topic='topic', + share_id='share_id', + snapshot_id='snapshot_id', + request_spec='fake_request_spec', + filter_properties='filter_properties', + version='1.3') diff --git a/cinder/tests/scheduler/test_scheduler.py b/cinder/tests/scheduler/test_scheduler.py new file mode 100644 index 0000000000..2101368f32 --- /dev/null +++ b/cinder/tests/scheduler/test_scheduler.py @@ -0,0 +1,394 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +from mox import IsA + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import timeutils +from cinder.scheduler import driver +from cinder.scheduler import manager +from cinder.scheduler import simple +from cinder import test +from cinder import utils + + +FLAGS = flags.FLAGS + + +class SchedulerManagerTestCase(test.TestCase): + """Test case for scheduler manager.""" + + manager_cls = manager.SchedulerManager + driver_cls = driver.Scheduler + driver_cls_name = 'cinder.scheduler.driver.Scheduler' + + class AnException(Exception): + pass + + def setUp(self): + super(SchedulerManagerTestCase, self).setUp() + self.flags(scheduler_driver=self.driver_cls_name) + self.manager = self.manager_cls() + self.context = context.RequestContext('fake_user', 'fake_project') + self.topic = 'fake_topic' + self.fake_args = (1, 2, 3) + self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} + + def test_1_correct_init(self): + # Correct scheduler driver + manager = self.manager + self.assertTrue(isinstance(manager.driver, self.driver_cls)) + + def test_update_service_capabilities(self): + service_name = 'fake_service' + host = 'fake_host' + + self.mox.StubOutWithMock(self.manager.driver, + 'update_service_capabilities') + + # Test no capabilities passes empty dictionary + self.manager.driver.update_service_capabilities(service_name, + host, {}) + self.mox.ReplayAll() + result = self.manager.update_service_capabilities( + self.context, + service_name=service_name, + host=host) + self.mox.VerifyAll() + + self.mox.ResetAll() + # Test capabilities passes correctly + capabilities = {'fake_capability': 'fake_value'} + self.manager.driver.update_service_capabilities(service_name, + host, + capabilities) + self.mox.ReplayAll() + result = self.manager.update_service_capabilities( + self.context, + service_name=service_name, host=host, + capabilities=capabilities) + + def test_create_volume_exception_puts_volume_in_error_state(self): + """Test that a NoValideHost exception for create_volume. + + Puts the volume in 'error' state and eats the exception. + """ + fake_volume_id = 1 + self._mox_schedule_method_helper('schedule_create_volume') + self.mox.StubOutWithMock(db, 'volume_update') + + topic = 'fake_topic' + volume_id = fake_volume_id + request_spec = {'volume_id': fake_volume_id} + + self.manager.driver.schedule_create_volume( + self.context, + request_spec, {}).AndRaise(exception.NoValidHost(reason="")) + db.volume_update(self.context, fake_volume_id, {'status': 'error'}) + + self.mox.ReplayAll() + self.manager.create_volume(self.context, topic, volume_id, + request_spec=request_spec, + filter_properties={}) + + def test_create_share_exception_puts_share_in_error_state(self): + """Test that a NoValideHost exception for create_share. + + Puts the share in 'error' state and eats the exception. + """ + fake_share_id = 1 + self._mox_schedule_method_helper('schedule_create_share') + self.mox.StubOutWithMock(db, 'share_update') + + topic = 'fake_topic' + share_id = fake_share_id + request_spec = {'share_id': fake_share_id} + + self.manager.driver.schedule_create_share( + self.context, + request_spec, {}).AndRaise(exception.NoValidHost(reason="")) + db.share_update(self.context, fake_share_id, {'status': 'error'}) + + self.mox.ReplayAll() + self.manager.create_share(self.context, topic, share_id, + request_spec=request_spec, + filter_properties={}) + + def _mox_schedule_method_helper(self, method_name): + # Make sure the method exists that we're going to test call + def stub_method(*args, **kwargs): + pass + + setattr(self.manager.driver, method_name, stub_method) + + self.mox.StubOutWithMock(self.manager.driver, + method_name) + + +class SchedulerTestCase(test.TestCase): + """Test case for base scheduler driver class.""" + + # So we can subclass this test and re-use tests if we need. + driver_cls = driver.Scheduler + + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.driver = self.driver_cls() + self.context = context.RequestContext('fake_user', 'fake_project') + self.topic = 'fake_topic' + + def test_update_service_capabilities(self): + service_name = 'fake_service' + host = 'fake_host' + + self.mox.StubOutWithMock(self.driver.host_manager, + 'update_service_capabilities') + + capabilities = {'fake_capability': 'fake_value'} + self.driver.host_manager.update_service_capabilities(service_name, + host, + capabilities) + self.mox.ReplayAll() + result = self.driver.update_service_capabilities(service_name, + host, + capabilities) + + def test_hosts_up(self): + service1 = {'host': 'host1'} + service2 = {'host': 'host2'} + services = [service1, service2] + + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + self.mox.StubOutWithMock(utils, 'service_is_up') + + db.service_get_all_by_topic(self.context, + self.topic).AndReturn(services) + utils.service_is_up(service1).AndReturn(False) + utils.service_is_up(service2).AndReturn(True) + + self.mox.ReplayAll() + result = self.driver.hosts_up(self.context, self.topic) + self.assertEqual(result, ['host2']) + + +class SchedulerDriverBaseTestCase(SchedulerTestCase): + """Test cases for base scheduler driver class methods + that can't will fail if the driver is changed""" + + def test_unimplemented_schedule(self): + fake_args = (1, 2, 3) + fake_kwargs = {'cat': 'meow'} + + self.assertRaises(NotImplementedError, self.driver.schedule, + self.context, self.topic, 'schedule_something', + *fake_args, **fake_kwargs) + + +class SchedulerDriverModuleTestCase(test.TestCase): + """Test case for scheduler driver module methods.""" + + def setUp(self): + super(SchedulerDriverModuleTestCase, self).setUp() + self.context = context.RequestContext('fake_user', 'fake_project') + + def test_volume_host_update_db(self): + self.mox.StubOutWithMock(timeutils, 'utcnow') + self.mox.StubOutWithMock(db, 'volume_update') + + timeutils.utcnow().AndReturn('fake-now') + db.volume_update(self.context, 31337, + {'host': 'fake_host', + 'scheduled_at': 'fake-now'}) + + self.mox.ReplayAll() + driver.volume_update_db(self.context, 31337, 'fake_host') + + def test_share_host_update_db(self): + self.mox.StubOutWithMock(timeutils, 'utcnow') + self.mox.StubOutWithMock(db, 'share_update') + + timeutils.utcnow().AndReturn('fake-now') + db.share_update(self.context, 31337, + {'host': 'fake_host', + 'scheduled_at': 'fake-now'}) + + self.mox.ReplayAll() + driver.share_update_db(self.context, 31337, 'fake_host') + + +class SimpleSchedulerSharesTestCase(test.TestCase): + """Test case for simple scheduler create share method.""" + driver = simple.SimpleScheduler() + + def setUp(self): + super(SimpleSchedulerSharesTestCase, self).setUp() + self.context = context.RequestContext('fake_user', 'fake_project') + self.admin_context = context.RequestContext('fake_admin_user', + 'fake_project') + self.admin_context.is_admin = True + + def test_create_share_if_two_services_up(self): + share_id = 'fake' + fake_share = {'id': share_id, 'size': 1} + + fake_service_1 = {'disabled': False, 'host': 'fake_host1'} + + fake_service_2 = {'disabled': False, 'host': 'fake_host2'} + + fake_result = [(fake_service_1, 2), (fake_service_2, 1)] + + self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') + self.mox.StubOutWithMock(utils, 'service_is_up') + self.mox.StubOutWithMock(driver, 'share_update_db') + + fake_request_spec = {'share_id': share_id, + 'share_properties': fake_share} + + db.service_get_all_share_sorted(IsA(context.RequestContext))\ + .AndReturn(fake_result) + utils.service_is_up(IsA(dict)).AndReturn(True) + driver.share_update_db(IsA(context.RequestContext), share_id, + 'fake_host1').AndReturn(fake_share) + self.mox.ReplayAll() + + self.driver.schedule_create_share(self.context, fake_request_spec, {}) + + def test_create_share_if_services_not_available(self): + share_id = 'fake' + fake_share = {'id': share_id, 'size': 1} + + fake_result = [] + + fake_request_spec = {'share_id': share_id, + 'share_properties': fake_share} + + self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') + + db.service_get_all_share_sorted(IsA(context.RequestContext))\ + .AndReturn(fake_result) + + self.mox.ReplayAll() + + self.assertRaises(exception.NoValidHost, + self.driver.schedule_create_share, + self.context, fake_request_spec, {}) + + def test_create_share_if_max_gigabytes_exceeded(self): + share_id = 'fake' + fake_share = {'id': share_id, 'size': 10001} + + fake_service_1 = {'disabled': False, 'host': 'fake_host1'} + + fake_service_2 = {'disabled': False, 'host': 'fake_host2'} + + fake_result = [(fake_service_1, 5), (fake_service_2, 7)] + + fake_request_spec = {'share_id': share_id, + 'share_properties': fake_share} + + self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') + + db.service_get_all_share_sorted(IsA(context.RequestContext))\ + .AndReturn(fake_result) + + self.mox.ReplayAll() + + self.assertRaises(exception.NoValidHost, + self.driver.schedule_create_share, + self.context, fake_request_spec, {}) + + def test_create_share_availability_zone(self): + share_id = 'fake' + fake_share = {'id': share_id, + 'availability_zone': 'fake:fake', + 'size': 1} + + fake_service_1 = {'disabled': False, 'host': 'fake_host1', + 'availability_zone': 'fake'} + + fake_service_2 = {'disabled': False, 'host': 'fake_host2', + 'availability_zone': 'super_fake'} + + fake_result = [(fake_service_1, 0), (fake_service_2, 1)] + + fake_request_spec = {'share_id': share_id, + 'share_properties': fake_share} + + self.mox.StubOutWithMock(utils, 'service_is_up') + self.mox.StubOutWithMock(driver, 'share_update_db') + self.mox.StubOutWithMock(db, 'service_get_all_share_sorted') + + db.service_get_all_share_sorted(IsA(context.RequestContext))\ + .AndReturn(fake_result) + + utils.service_is_up(fake_service_1).AndReturn(True) + driver.share_update_db(IsA(context.RequestContext), share_id, + fake_service_1['host']).AndReturn(fake_share) + + self.mox.ReplayAll() + self.driver.schedule_create_share(self.context, fake_request_spec, {}) + + def test_create_share_availability_zone_on_host(self): + share_id = 'fake' + fake_share = {'id': share_id, + 'availability_zone': 'fake:fake', + 'size': 1} + + fake_request_spec = {'share_id': share_id, + 'share_properties': fake_share} + + self.mox.StubOutWithMock(utils, 'service_is_up') + self.mox.StubOutWithMock(db, 'service_get_by_args') + self.mox.StubOutWithMock(driver, 'share_update_db') + + db.service_get_by_args(IsA(context.RequestContext), 'fake', + 'cinder-share').AndReturn('fake_service') + utils.service_is_up('fake_service').AndReturn(True) + driver.share_update_db(IsA(context.RequestContext), share_id, + 'fake').AndReturn(fake_share) + + self.mox.ReplayAll() + self.driver.schedule_create_share(self.admin_context, + fake_request_spec, {}) + + def test_create_share_availability_zone_if_service_down(self): + share_id = 'fake' + fake_share = {'id': share_id, + 'availability_zone': 'fake:fake', + 'size': 1} + + fake_request_spec = {'share_id': share_id, + 'share_properties': fake_share} + + self.mox.StubOutWithMock(utils, 'service_is_up') + self.mox.StubOutWithMock(db, 'service_get_by_args') + + db.service_get_by_args(IsA(context.RequestContext), 'fake', + 'cinder-share').AndReturn('fake_service') + utils.service_is_up('fake_service').AndReturn(False) + + self.mox.ReplayAll() + self.assertRaises(exception.WillNotSchedule, + self.driver.schedule_create_share, + self.admin_context, fake_request_spec, {}) diff --git a/cinder/tests/scheduler/test_scheduler_options.py b/cinder/tests/scheduler/test_scheduler_options.py new file mode 100644 index 0000000000..7ecb772baf --- /dev/null +++ b/cinder/tests/scheduler/test_scheduler_options.py @@ -0,0 +1,138 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For PickledScheduler. +""" + +import datetime +import StringIO + +from cinder.openstack.common import jsonutils +from cinder.scheduler import scheduler_options +from cinder import test + + +class FakeSchedulerOptions(scheduler_options.SchedulerOptions): + def __init__(self, last_checked, now, file_old, file_now, data, filedata): + super(FakeSchedulerOptions, self).__init__() + # Change internals ... + self.last_modified = file_old + self.last_checked = last_checked + self.data = data + + # For overrides ... + self._time_now = now + self._file_now = file_now + self._file_data = filedata + + self.file_was_loaded = False + + def _get_file_timestamp(self, filename): + return self._file_now + + def _get_file_handle(self, filename): + self.file_was_loaded = True + return StringIO.StringIO(self._file_data) + + def _get_time_now(self): + return self._time_now + + +class SchedulerOptionsTestCase(test.TestCase): + def test_get_configuration_first_time_no_flag(self): + last_checked = None + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = None + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + {}, jdata) + self.assertEquals({}, fake.get_configuration()) + self.assertFalse(fake.file_was_loaded) + + def test_get_configuration_first_time_empty_file(self): + last_checked = None + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = None + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = "" + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + {}, jdata) + self.assertEquals({}, fake.get_configuration('foo.json')) + self.assertTrue(fake.file_was_loaded) + + def test_get_configuration_first_time_happy_day(self): + last_checked = None + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = None + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + {}, jdata) + self.assertEquals(data, fake.get_configuration('foo.json')) + self.assertTrue(fake.file_was_loaded) + + def test_get_configuration_second_time_no_change(self): + last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + data, jdata) + self.assertEquals(data, fake.get_configuration('foo.json')) + self.assertFalse(fake.file_was_loaded) + + def test_get_configuration_second_time_too_fast(self): + last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) + now = datetime.datetime(2011, 1, 1, 1, 1, 2) + file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) + + old_data = dict(a=1, b=2, c=3) + data = dict(a=11, b=12, c=13) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + old_data, jdata) + self.assertEquals(old_data, fake.get_configuration('foo.json')) + self.assertFalse(fake.file_was_loaded) + + def test_get_configuration_second_time_change(self): + last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) + + old_data = dict(a=1, b=2, c=3) + data = dict(a=11, b=12, c=13) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + old_data, jdata) + self.assertEquals(data, fake.get_configuration('foo.json')) + self.assertTrue(fake.file_was_loaded) diff --git a/cinder/tests/test_HpSanISCSIDriver.py b/cinder/tests/test_HpSanISCSIDriver.py new file mode 100644 index 0000000000..a9ef2106cd --- /dev/null +++ b/cinder/tests/test_HpSanISCSIDriver.py @@ -0,0 +1,245 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.san.hp_lefthand import HpSanISCSIDriver + +LOG = logging.getLogger(__name__) + + +class HpSanISCSITestCase(test.TestCase): + + def setUp(self): + super(HpSanISCSITestCase, self).setUp() + self.stubs.Set(HpSanISCSIDriver, "_cliq_run", + self._fake_cliq_run) + self.stubs.Set(HpSanISCSIDriver, "_get_iscsi_properties", + self._fake_get_iscsi_properties) + configuration = mox.MockObject(conf.Configuration) + configuration.san_is_local = False + configuration.san_ip = "10.0.0.1" + configuration.san_login = "foo" + configuration.san_password = "bar" + configuration.san_ssh_port = 16022 + configuration.san_clustername = "CloudCluster1" + configuration.san_thin_provision = True + configuration.append_config_values(mox.IgnoreArg()) + + self.driver = HpSanISCSIDriver(configuration=configuration) + self.volume_name = "fakevolume" + self.connector = {'ip': '10.0.0.2', + 'initiator': 'iqn.1993-08.org.debian:01:222', + 'host': 'fakehost'} + self.properties = { + 'target_discoverd': True, + 'target_portal': '10.0.1.6:3260', + 'target_iqn': + 'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev', + 'volume_id': 1} + + def tearDown(self): + super(HpSanISCSITestCase, self).tearDown() + + def _fake_get_iscsi_properties(self, volume): + return self.properties + + def _fake_cliq_run(self, verb, cliq_args, check_exit_code=True): + """Return fake results for the various methods.""" + + def create_volume(cliq_args): + """ + input = "createVolume description="fake description" + clusterName=Cluster01 volumeName=fakevolume + thinProvision=0 output=XML size=1GB" + """ + output = """ + + """ + self.assertEqual(cliq_args['volumeName'], self.volume_name) + self.assertEqual(cliq_args['thinProvision'], '1') + self.assertEqual(cliq_args['size'], '1GB') + return output, None + + def delete_volume(cliq_args): + """ + input = "deleteVolume volumeName=fakevolume prompt=false + output=XML" + """ + output = """ + + """ + self.assertEqual(cliq_args['volumeName'], self.volume_name) + self.assertEqual(cliq_args['prompt'], 'false') + return output, None + + def assign_volume(cliq_args): + """ + input = "assignVolumeToServer volumeName=fakevolume + serverName=fakehost + output=XML" + """ + output = """ + + """ + self.assertEqual(cliq_args['volumeName'], self.volume_name) + self.assertEqual(cliq_args['serverName'], self.connector['host']) + return output, None + + def unassign_volume(cliq_args): + """ + input = "unassignVolumeToServer volumeName=fakevolume + serverName=fakehost output=XML + """ + output = """ + + """ + self.assertEqual(cliq_args['volumeName'], self.volume_name) + self.assertEqual(cliq_args['serverName'], self.connector['host']) + return output, None + + def get_cluster_info(cliq_args): + """ + input = "getClusterInfo clusterName=Cluster01 searchDepth=1 + verbose=0 output=XML" + """ + output = """ + + + + + + """ + return output, None + + def get_volume_info(cliq_args): + """ + input = "getVolumeInfo volumeName=fakevolume output=XML" + """ + output = """ + + + + + """ + return output, None + + def get_server_info(cliq_args): + """ + input = "getServerInfo serverName=fakeName" + """ + output = """ + """ + return output, None + + def create_server(cliq_args): + """ + input = "createServer serverName=fakeName initiator=something" + """ + output = """ + """ + return output, None + + def test_error(cliq_args): + output = """ + + """ + return output, None + + self.assertEqual(cliq_args['output'], 'XML') + try: + verbs = {'createVolume': create_volume, + 'deleteVolume': delete_volume, + 'assignVolumeToServer': assign_volume, + 'unassignVolumeToServer': unassign_volume, + 'getClusterInfo': get_cluster_info, + 'getVolumeInfo': get_volume_info, + 'getServerInfo': get_server_info, + 'createServer': create_server, + 'testError': test_error} + except KeyError: + raise NotImplementedError() + + return verbs[verb](cliq_args) + + def test_create_volume(self): + volume = {'name': self.volume_name, 'size': 1} + model_update = self.driver.create_volume(volume) + expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0" + expected_location = "10.0.1.6:3260,1 %s" % expected_iqn + self.assertEqual(model_update['provider_location'], expected_location) + + def test_delete_volume(self): + volume = {'name': self.volume_name} + self.driver.delete_volume(volume) + + def test_initialize_connection(self): + volume = {'name': self.volume_name} + result = self.driver.initialize_connection(volume, self.connector) + self.assertEqual(result['driver_volume_type'], 'iscsi') + self.assertDictMatch(result['data'], self.properties) + + def test_terminate_connection(self): + volume = {'name': self.volume_name} + self.driver.terminate_connection(volume, self.connector) + + def test_create_snapshot(self): + try: + self.driver.create_snapshot("") + except NotImplementedError: + pass + + def test_create_volume_from_snapshot(self): + try: + self.driver.create_volume_from_snapshot("", "") + except NotImplementedError: + pass + + def test_cliq_error(self): + try: + self.driver._cliq_run_xml("testError", {}) + except exception.VolumeBackendAPIException: + pass diff --git a/cinder/tests/test_api.py b/cinder/tests/test_api.py new file mode 100644 index 0000000000..4397b5b4e5 --- /dev/null +++ b/cinder/tests/test_api.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the API endpoint.""" + +import httplib +import StringIO + +import webob + + +class FakeHttplibSocket(object): + """A fake socket implementation for httplib.HTTPResponse, trivial.""" + def __init__(self, response_string): + self.response_string = response_string + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer.""" + return self._buffer + + +class FakeHttplibConnection(object): + """A fake httplib.HTTPConnection for boto. + + requests made via this connection actually get translated and routed into + our WSGI app, we then wait for the response and turn it back into + the httplib.HTTPResponse that boto expects. + """ + def __init__(self, app, host, is_secure=False): + self.app = app + self.host = host + + def request(self, method, path, data, headers): + req = webob.Request.blank(path) + req.method = method + req.body = data + req.headers = headers + req.headers['Accept'] = 'text/html' + req.host = self.host + # Call the WSGI app, get the HTTP response + resp = str(req.get_response(self.app)) + # For some reason, the response doesn't have "HTTP/1.0 " prepended; I + # guess that's a function the web server usually provides. + resp = "HTTP/1.0 %s" % resp + self.sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(self.sock) + # NOTE(vish): boto is accessing private variables for some reason + self._HTTPConnection__response = self.http_response + self.http_response.begin() + + def getresponse(self): + return self.http_response + + def getresponsebody(self): + return self.sock.response_string + + def close(self): + """Required for compatibility with boto/tornado.""" + pass diff --git a/cinder/tests/test_backup.py b/cinder/tests/test_backup.py new file mode 100644 index 0000000000..bffddb36c8 --- /dev/null +++ b/cinder/tests/test_backup.py @@ -0,0 +1,395 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Backup code. + +""" + +import tempfile + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import test + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class FakeBackupException(Exception): + pass + + +class BackupTestCase(test.TestCase): + """Test Case for backups.""" + + def setUp(self): + super(BackupTestCase, self).setUp() + vol_tmpdir = tempfile.mkdtemp() + self.flags(connection_type='fake', + volumes_dir=vol_tmpdir) + self.backup_mgr = \ + importutils.import_object(FLAGS.backup_manager) + self.backup_mgr.host = 'testhost' + self.ctxt = context.get_admin_context() + + def tearDown(self): + super(BackupTestCase, self).tearDown() + + def _create_backup_db_entry(self, volume_id=1, display_name='test_backup', + display_description='this is a test backup', + container='volumebackups', + status='creating', + size=0, + object_count=0, + project_id='fake'): + """ + Create a backup entry in the DB. + Return the entry ID + """ + backup = {} + backup['volume_id'] = volume_id + backup['user_id'] = 'fake' + backup['project_id'] = project_id + backup['host'] = 'testhost' + backup['availability_zone'] = '1' + backup['display_name'] = display_name + backup['display_description'] = display_description + backup['container'] = container + backup['status'] = status + backup['fail_reason'] = '' + backup['service'] = FLAGS.backup_service + backup['size'] = size + backup['object_count'] = object_count + return db.backup_create(self.ctxt, backup)['id'] + + def _create_volume_db_entry(self, display_name='test_volume', + display_description='this is a test volume', + status='backing-up', + size=1): + """ + Create a volume entry in the DB. + Return the entry ID + """ + vol = {} + vol['size'] = size + vol['host'] = 'testhost' + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['status'] = status + vol['display_name'] = display_name + vol['display_description'] = display_description + vol['attach_status'] = 'detached' + return db.volume_create(self.ctxt, vol)['id'] + + def test_init_host(self): + """Make sure stuck volumes and backups are reset to correct + states when backup_manager.init_host() is called""" + vol1_id = self._create_volume_db_entry(status='backing-up') + vol2_id = self._create_volume_db_entry(status='restoring-backup') + backup1_id = self._create_backup_db_entry(status='creating') + backup2_id = self._create_backup_db_entry(status='restoring') + backup3_id = self._create_backup_db_entry(status='deleting') + + self.backup_mgr.init_host() + vol1 = db.volume_get(self.ctxt, vol1_id) + self.assertEquals(vol1['status'], 'available') + vol2 = db.volume_get(self.ctxt, vol2_id) + self.assertEquals(vol2['status'], 'error_restoring') + + backup1 = db.backup_get(self.ctxt, backup1_id) + self.assertEquals(backup1['status'], 'error') + backup2 = db.backup_get(self.ctxt, backup2_id) + self.assertEquals(backup2['status'], 'available') + self.assertRaises(exception.BackupNotFound, + db.backup_get, + self.ctxt, + backup3_id) + + def test_create_backup_with_bad_volume_status(self): + """Test error handling when creating a backup from a volume + with a bad status""" + vol_id = self._create_volume_db_entry(status='available', size=1) + backup_id = self._create_backup_db_entry(volume_id=vol_id) + self.assertRaises(exception.InvalidVolume, + self.backup_mgr.create_backup, + self.ctxt, + backup_id) + + def test_create_backup_with_bad_backup_status(self): + """Test error handling when creating a backup with a backup + with a bad status""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(status='available', + volume_id=vol_id) + self.assertRaises(exception.InvalidBackup, + self.backup_mgr.create_backup, + self.ctxt, + backup_id) + + def test_create_backup_with_error(self): + """Test error handling when an error occurs during backup creation""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(volume_id=vol_id) + + def fake_backup_volume(context, backup, backup_service): + raise FakeBackupException('fake') + + self.stubs.Set(self.backup_mgr.driver, 'backup_volume', + fake_backup_volume) + + self.assertRaises(FakeBackupException, + self.backup_mgr.create_backup, + self.ctxt, + backup_id) + vol = db.volume_get(self.ctxt, vol_id) + self.assertEquals(vol['status'], 'available') + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'error') + + def test_create_backup(self): + """Test normal backup creation""" + vol_size = 1 + vol_id = self._create_volume_db_entry(size=vol_size) + backup_id = self._create_backup_db_entry(volume_id=vol_id) + + def fake_backup_volume(context, backup, backup_service): + pass + + self.stubs.Set(self.backup_mgr.driver, 'backup_volume', + fake_backup_volume) + + self.backup_mgr.create_backup(self.ctxt, backup_id) + vol = db.volume_get(self.ctxt, vol_id) + self.assertEquals(vol['status'], 'available') + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'available') + self.assertEqual(backup['size'], vol_size) + + def test_restore_backup_with_bad_volume_status(self): + """Test error handling when restoring a backup to a volume + with a bad status""" + vol_id = self._create_volume_db_entry(status='available', size=1) + backup_id = self._create_backup_db_entry(volume_id=vol_id) + self.assertRaises(exception.InvalidVolume, + self.backup_mgr.restore_backup, + self.ctxt, + backup_id, + vol_id) + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'available') + + def test_restore_backup_with_bad_backup_status(self): + """Test error handling when restoring a backup with a backup + with a bad status""" + vol_id = self._create_volume_db_entry(status='restoring-backup', + size=1) + backup_id = self._create_backup_db_entry(status='available', + volume_id=vol_id) + self.assertRaises(exception.InvalidBackup, + self.backup_mgr.restore_backup, + self.ctxt, + backup_id, + vol_id) + vol = db.volume_get(self.ctxt, vol_id) + self.assertEquals(vol['status'], 'error') + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'error') + + def test_restore_backup_with_driver_error(self): + """Test error handling when an error occurs during backup restore""" + vol_id = self._create_volume_db_entry(status='restoring-backup', + size=1) + backup_id = self._create_backup_db_entry(status='restoring', + volume_id=vol_id) + + def fake_restore_backup(context, backup, volume, backup_service): + raise FakeBackupException('fake') + + self.stubs.Set(self.backup_mgr.driver, 'restore_backup', + fake_restore_backup) + + self.assertRaises(FakeBackupException, + self.backup_mgr.restore_backup, + self.ctxt, + backup_id, + vol_id) + vol = db.volume_get(self.ctxt, vol_id) + self.assertEquals(vol['status'], 'error_restoring') + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'available') + + def test_restore_backup_with_bad_service(self): + """Test error handling when attempting a restore of a backup + with a different service to that used to create the backup""" + vol_id = self._create_volume_db_entry(status='restoring-backup', + size=1) + backup_id = self._create_backup_db_entry(status='restoring', + volume_id=vol_id) + + def fake_restore_backup(context, backup, volume, backup_service): + pass + + self.stubs.Set(self.backup_mgr.driver, 'restore_backup', + fake_restore_backup) + + service = 'cinder.tests.backup.bad_service' + db.backup_update(self.ctxt, backup_id, {'service': service}) + self.assertRaises(exception.InvalidBackup, + self.backup_mgr.restore_backup, + self.ctxt, + backup_id, + vol_id) + vol = db.volume_get(self.ctxt, vol_id) + self.assertEquals(vol['status'], 'error') + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'available') + + def test_restore_backup(self): + """Test normal backup restoration""" + vol_size = 1 + vol_id = self._create_volume_db_entry(status='restoring-backup', + size=vol_size) + backup_id = self._create_backup_db_entry(status='restoring', + volume_id=vol_id) + + def fake_restore_backup(context, backup, volume, backup_service): + pass + + self.stubs.Set(self.backup_mgr.driver, 'restore_backup', + fake_restore_backup) + + self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id) + vol = db.volume_get(self.ctxt, vol_id) + self.assertEquals(vol['status'], 'available') + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'available') + + def test_delete_backup_with_bad_backup_status(self): + """Test error handling when deleting a backup with a backup + with a bad status""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(status='available', + volume_id=vol_id) + self.assertRaises(exception.InvalidBackup, + self.backup_mgr.delete_backup, + self.ctxt, + backup_id) + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'error') + + def test_delete_backup_with_error(self): + """Test error handling when an error occurs during backup deletion.""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(status='deleting', + display_name='fail_on_delete', + volume_id=vol_id) + self.assertRaises(IOError, + self.backup_mgr.delete_backup, + self.ctxt, + backup_id) + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'error') + + def test_delete_backup_with_bad_service(self): + """Test error handling when attempting a delete of a backup + with a different service to that used to create the backup""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(status='deleting', + volume_id=vol_id) + service = 'cinder.tests.backup.bad_service' + db.backup_update(self.ctxt, backup_id, {'service': service}) + self.assertRaises(exception.InvalidBackup, + self.backup_mgr.delete_backup, + self.ctxt, + backup_id) + backup = db.backup_get(self.ctxt, backup_id) + self.assertEquals(backup['status'], 'error') + + def test_delete_backup_with_no_service(self): + """Test error handling when attempting a delete of a backup + with no service defined for that backup, relates to bug #1162908""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(status='deleting', + volume_id=vol_id) + db.backup_update(self.ctxt, backup_id, {'service': None}) + self.backup_mgr.delete_backup(self.ctxt, backup_id) + + def test_delete_backup(self): + """Test normal backup deletion""" + vol_id = self._create_volume_db_entry(size=1) + backup_id = self._create_backup_db_entry(status='deleting', + volume_id=vol_id) + self.backup_mgr.delete_backup(self.ctxt, backup_id) + self.assertRaises(exception.BackupNotFound, + db.backup_get, + self.ctxt, + backup_id) + + ctxt_read_deleted = context.get_admin_context('yes') + backup = db.backup_get(ctxt_read_deleted, backup_id) + self.assertEqual(backup.deleted, True) + self.assertTrue(timeutils.utcnow() > backup.deleted_at) + self.assertEqual(backup.status, 'deleted') + + def test_list_backup(self): + backups = db.backup_get_all_by_project(self.ctxt, 'project1') + self.assertEqual(len(backups), 0) + + b1 = self._create_backup_db_entry() + b2 = self._create_backup_db_entry(project_id='project1') + backups = db.backup_get_all_by_project(self.ctxt, 'project1') + self.assertEqual(len(backups), 1) + self.assertEqual(backups[0].id, b2) + + def test_backup_get_all_by_project_with_deleted(self): + """Test deleted backups don't show up in backup_get_all_by_project. + Unless context.read_deleted is 'yes'""" + backups = db.backup_get_all_by_project(self.ctxt, 'fake') + self.assertEqual(len(backups), 0) + + backup_id_keep = self._create_backup_db_entry() + backup_id = self._create_backup_db_entry() + db.backup_destroy(self.ctxt, backup_id) + + backups = db.backup_get_all_by_project(self.ctxt, 'fake') + self.assertEqual(len(backups), 1) + self.assertEqual(backups[0].id, backup_id_keep) + + ctxt_read_deleted = context.get_admin_context('yes') + backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake') + self.assertEqual(len(backups), 2) + + def test_backup_get_all_by_host_with_deleted(self): + """Test deleted backups don't show up in backup_get_all_by_project. + Unless context.read_deleted is 'yes'""" + backups = db.backup_get_all_by_host(self.ctxt, 'testhost') + self.assertEqual(len(backups), 0) + + backup_id_keep = self._create_backup_db_entry() + backup_id = self._create_backup_db_entry() + db.backup_destroy(self.ctxt, backup_id) + + backups = db.backup_get_all_by_host(self.ctxt, 'testhost') + self.assertEqual(len(backups), 1) + self.assertEqual(backups[0].id, backup_id_keep) + + ctxt_read_deleted = context.get_admin_context('yes') + backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') + self.assertEqual(len(backups), 2) diff --git a/cinder/tests/test_backup_swift.py b/cinder/tests/test_backup_swift.py new file mode 100644 index 0000000000..013641729a --- /dev/null +++ b/cinder/tests/test_backup_swift.py @@ -0,0 +1,208 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Backup swift code. + +""" + +import bz2 +import hashlib +import os +import tempfile +import zlib + +from cinder.backup.services.swift import SwiftBackupService +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.backup.fake_swift_client import FakeSwiftClient +from swiftclient import client as swift + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def fake_md5(arg): + class result(object): + def hexdigest(self): + return 'fake-md5-sum' + + ret = result() + return ret + + +class BackupSwiftTestCase(test.TestCase): + """Test Case for swift.""" + + def _create_volume_db_entry(self): + vol = {'id': '1234-5678-1234-8888', + 'size': 1, + 'status': 'available'} + return db.volume_create(self.ctxt, vol)['id'] + + def _create_backup_db_entry(self, container='test-container'): + backup = {'id': 123, + 'size': 1, + 'container': container, + 'volume_id': '1234-5678-1234-8888'} + return db.backup_create(self.ctxt, backup)['id'] + + def setUp(self): + super(BackupSwiftTestCase, self).setUp() + self.ctxt = context.get_admin_context() + + self.stubs.Set(swift, 'Connection', FakeSwiftClient.Connection) + self.stubs.Set(hashlib, 'md5', fake_md5) + + self._create_volume_db_entry() + self.volume_file = tempfile.NamedTemporaryFile() + for i in xrange(0, 128): + self.volume_file.write(os.urandom(1024)) + + def tearDown(self): + self.volume_file.close() + super(BackupSwiftTestCase, self).tearDown() + + def test_backup_uncompressed(self): + self._create_backup_db_entry() + self.flags(backup_compression_algorithm='none') + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + service.backup(backup, self.volume_file) + + def test_backup_bz2(self): + self._create_backup_db_entry() + self.flags(backup_compression_algorithm='bz2') + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + service.backup(backup, self.volume_file) + + def test_backup_zlib(self): + self._create_backup_db_entry() + self.flags(backup_compression_algorithm='zlib') + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + service.backup(backup, self.volume_file) + + def test_backup_default_container(self): + self._create_backup_db_entry(container=None) + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + service.backup(backup, self.volume_file) + backup = db.backup_get(self.ctxt, 123) + self.assertEquals(backup['container'], 'volumebackups') + + def test_backup_custom_container(self): + container_name = 'fake99' + self._create_backup_db_entry(container=container_name) + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + service.backup(backup, self.volume_file) + backup = db.backup_get(self.ctxt, 123) + self.assertEquals(backup['container'], container_name) + + def test_create_backup_container_check_wraps_socket_error(self): + container_name = 'socket_error_on_head' + self._create_backup_db_entry(container=container_name) + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + self.assertRaises(exception.SwiftConnectionFailed, + service.backup, + backup, self.volume_file) + + def test_create_backup_put_object_wraps_socket_error(self): + container_name = 'socket_error_on_put' + self._create_backup_db_entry(container=container_name) + service = SwiftBackupService(self.ctxt) + self.volume_file.seek(0) + backup = db.backup_get(self.ctxt, 123) + self.assertRaises(exception.SwiftConnectionFailed, + service.backup, + backup, self.volume_file) + + def test_restore(self): + self._create_backup_db_entry() + service = SwiftBackupService(self.ctxt) + + with tempfile.NamedTemporaryFile() as volume_file: + backup = db.backup_get(self.ctxt, 123) + service.restore(backup, '1234-5678-1234-8888', volume_file) + + def test_restore_wraps_socket_error(self): + container_name = 'socket_error_on_get' + self._create_backup_db_entry(container=container_name) + service = SwiftBackupService(self.ctxt) + + with tempfile.NamedTemporaryFile() as volume_file: + backup = db.backup_get(self.ctxt, 123) + self.assertRaises(exception.SwiftConnectionFailed, + service.restore, + backup, '1234-5678-1234-8888', volume_file) + + def test_restore_unsupported_version(self): + container_name = 'unsupported_version' + self._create_backup_db_entry(container=container_name) + service = SwiftBackupService(self.ctxt) + + with tempfile.NamedTemporaryFile() as volume_file: + backup = db.backup_get(self.ctxt, 123) + self.assertRaises(exception.InvalidBackup, + service.restore, + backup, '1234-5678-1234-8888', volume_file) + + def test_delete(self): + self._create_backup_db_entry() + service = SwiftBackupService(self.ctxt) + backup = db.backup_get(self.ctxt, 123) + service.delete(backup) + + def test_delete_wraps_socket_error(self): + container_name = 'socket_error_on_delete' + self._create_backup_db_entry(container=container_name) + service = SwiftBackupService(self.ctxt) + backup = db.backup_get(self.ctxt, 123) + self.assertRaises(exception.SwiftConnectionFailed, + service.delete, + backup) + + def test_get_compressor(self): + service = SwiftBackupService(self.ctxt) + compressor = service._get_compressor('None') + self.assertEquals(compressor, None) + compressor = service._get_compressor('zlib') + self.assertEquals(compressor, zlib) + compressor = service._get_compressor('bz2') + self.assertEquals(compressor, bz2) + self.assertRaises(ValueError, service._get_compressor, 'fake') + + def test_check_container_exists(self): + service = SwiftBackupService(self.ctxt) + exists = service._check_container_exists('fake_container') + self.assertEquals(exists, True) + exists = service._check_container_exists('missing_container') + self.assertEquals(exists, False) + self.assertRaises(swift.ClientException, + service._check_container_exists, + 'unauthorized_container') diff --git a/cinder/tests/test_context.py b/cinder/tests/test_context.py new file mode 100644 index 0000000000..886ca28825 --- /dev/null +++ b/cinder/tests/test_context.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import context +from cinder import test + + +class ContextTestCase(test.TestCase): + + def test_request_context_sets_is_admin(self): + ctxt = context.RequestContext('111', + '222', + roles=['admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_sets_is_admin_upcase(self): + ctxt = context.RequestContext('111', + '222', + roles=['Admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_read_deleted(self): + ctxt = context.RequestContext('111', + '222', + read_deleted='yes') + self.assertEquals(ctxt.read_deleted, 'yes') + + ctxt.read_deleted = 'no' + self.assertEquals(ctxt.read_deleted, 'no') + + def test_request_context_read_deleted_invalid(self): + self.assertRaises(ValueError, + context.RequestContext, + '111', + '222', + read_deleted=True) + + ctxt = context.RequestContext('111', '222') + self.assertRaises(ValueError, + setattr, + ctxt, + 'read_deleted', + True) + + def test_extra_args_to_context_get_logged(self): + info = {} + + def fake_warn(log_msg): + info['log_msg'] = log_msg + + self.stubs.Set(context.LOG, 'warn', fake_warn) + + c = context.RequestContext('user', + 'project', + extra_arg1='meow', + extra_arg2='wuff') + self.assertTrue(c) + self.assertIn("'extra_arg1': 'meow'", info['log_msg']) + self.assertIn("'extra_arg2': 'wuff'", info['log_msg']) diff --git a/cinder/tests/test_coraid.py b/cinder/tests/test_coraid.py new file mode 100644 index 0000000000..cda121b283 --- /dev/null +++ b/cinder/tests/test_coraid.py @@ -0,0 +1,268 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers import coraid +from cinder.volume.drivers.coraid import CoraidDriver +from cinder.volume.drivers.coraid import CoraidESMException +from cinder.volume.drivers.coraid import CoraidRESTClient + +import cookielib +import urllib2 + +LOG = logging.getLogger(__name__) + + +fake_esm_ipaddress = "192.168.0.1" +fake_esm_username = "darmok" +fake_esm_group = "tanagra" +fake_esm_password = "12345678" + +fake_volume_name = "volume-12345678-1234-1234-1234-1234567890ab" +fake_volume_size = "10" +fake_repository_name = "A-B:C:D" +fake_pool_name = "FakePool" +fake_aoetarget = 4081 +fake_shelf = 16 +fake_lun = 241 + +fake_str_aoetarget = str(fake_aoetarget) +fake_lun_addr = {"shelf": fake_shelf, "lun": fake_lun} + +fake_volume = {"name": fake_volume_name, + "size": fake_volume_size, + "volume_type": {"id": 1}} + +fake_volume_info = {"pool": fake_pool_name, + "repo": fake_repository_name, + "vsxidx": fake_aoetarget, + "index": fake_lun, + "shelf": fake_shelf} + +fake_lun_info = {"shelf": fake_shelf, "lun": fake_lun} + +fake_snapshot_name = "snapshot-12345678-8888-8888-1234-1234567890ab" +fake_snapshot_id = "12345678-8888-8888-1234-1234567890ab" +fake_volume_id = "12345678-1234-1234-1234-1234567890ab" +fake_snapshot = {"id": fake_snapshot_id, + "volume_id": fake_volume_id} + +fake_configure_data = [{"addr": "cms", "data": "FAKE"}] + +fake_esm_fetch = [[ + {"command": "super_fake_command_of_death"}, + {"reply": [ + {"lv": + {"containingPool": fake_pool_name, + "lunIndex": fake_aoetarget, + "name": fake_volume_name, + "lvStatus": + {"exportedLun": + {"lun": fake_lun, + "shelf": fake_shelf}} + }, + "repoName": fake_repository_name}]}]] + +fake_esm_success = {"category": "provider", + "tracking": False, + "configState": "completedSuccessfully", + "heldPending": False, + "metaCROp": "noAction", + "message": None} + +fake_group_fullpath = "admin group:%s" % (fake_esm_group) +fake_group_id = 4 +fake_login_reply = {"values": [ + {"fullPath": fake_group_fullpath, + "groupId": fake_group_id}], + "message": "", + "state": "adminSucceed", + "metaCROp": "noAction"} + +fake_group_fail_fullpath = "fail group:%s" % (fake_esm_group) +fake_group_fail_id = 5 +fake_login_reply_group_fail = {"values": [ + {"fullPath": fake_group_fail_fullpath, + "groupId": fake_group_fail_id}], + "message": "", + "state": "adminSucceed", + "metaCROp": "noAction"} + + +class TestCoraidDriver(test.TestCase): + def setUp(self): + super(TestCoraidDriver, self).setUp() + self.esm_mock = self.mox.CreateMockAnything() + self.stubs.Set(coraid, 'CoraidRESTClient', + lambda *_, **__: self.esm_mock) + configuration = mox.MockObject(conf.Configuration) + configuration.append_config_values(mox.IgnoreArg()) + configuration.coraid_esm_address = fake_esm_ipaddress + configuration.coraid_user = fake_esm_username + configuration.coraid_group = fake_esm_group + configuration.coraid_password = fake_esm_password + + self.drv = CoraidDriver(configuration=configuration) + self.drv.do_setup({}) + + def test_create_volume(self): + setattr(self.esm_mock, 'create_lun', lambda *_: True) + self.stubs.Set(CoraidDriver, '_get_repository', + lambda *_: fake_repository_name) + self.drv.create_volume(fake_volume) + + def test_delete_volume(self): + setattr(self.esm_mock, 'delete_lun', + lambda *_: True) + self.drv.delete_volume(fake_volume) + + def test_initialize_connection(self): + setattr(self.esm_mock, '_get_lun_address', + lambda *_: fake_lun_addr) + self.drv.initialize_connection(fake_volume, '') + + def test_create_snapshot(self): + setattr(self.esm_mock, 'create_snapshot', + lambda *_: True) + self.drv.create_snapshot(fake_snapshot) + + def test_delete_snapshot(self): + setattr(self.esm_mock, 'delete_snapshot', + lambda *_: True) + self.drv.delete_snapshot(fake_snapshot) + + def test_create_volume_from_snapshot(self): + setattr(self.esm_mock, 'create_volume_from_snapshot', + lambda *_: True) + self.stubs.Set(CoraidDriver, '_get_repository', + lambda *_: fake_repository_name) + self.drv.create_volume_from_snapshot(fake_volume, fake_snapshot) + + +class TestCoraidRESTClient(test.TestCase): + def setUp(self): + super(TestCoraidRESTClient, self).setUp() + self.stubs.Set(cookielib, 'CookieJar', lambda *_: True) + self.stubs.Set(urllib2, 'build_opener', lambda *_: True) + self.stubs.Set(urllib2, 'HTTPCookieProcessor', lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_login', lambda *_: True) + self.rest_mock = self.mox.CreateMockAnything() + self.stubs.Set(coraid, 'CoraidRESTClient', + lambda *_, **__: self.rest_mock) + self.drv = CoraidRESTClient(fake_esm_ipaddress, + fake_esm_username, + fake_esm_group, + fake_esm_password) + + def test__get_group_id(self): + setattr(self.rest_mock, '_get_group_id', + lambda *_: True) + self.assertEquals(self.drv._get_group_id(fake_esm_group, + fake_login_reply), + fake_group_id) + + def test__set_group(self): + setattr(self.rest_mock, '_set_group', + lambda *_: fake_group_id) + self.stubs.Set(CoraidRESTClient, '_admin_esm_cmd', + lambda *_: fake_login_reply) + self.drv._set_group(fake_login_reply) + + def test__set_group_fails_no_group(self): + setattr(self.rest_mock, '_set_group', + lambda *_: False) + self.stubs.Set(CoraidRESTClient, '_admin_esm_cmd', + lambda *_: fake_login_reply_group_fail) + self.assertRaises(CoraidESMException, + self.drv._set_group, + fake_login_reply_group_fail) + + def test__configure(self): + setattr(self.rest_mock, '_configure', + lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_esm_cmd', + lambda *_: fake_esm_success) + self.drv._configure(fake_configure_data) + + def test__get_volume_info(self): + setattr(self.rest_mock, '_get_volume_info', + lambda *_: fake_volume_info) + self.stubs.Set(CoraidRESTClient, '_esm_cmd', + lambda *_: fake_esm_fetch) + self.drv._get_volume_info(fake_volume_name) + + def test__get_lun_address(self): + setattr(self.rest_mock, '_get_lun_address', + lambda *_: fake_lun_info) + self.stubs.Set(CoraidRESTClient, '_get_volume_info', + lambda *_: fake_volume_info) + self.drv._get_lun_address(fake_volume_name) + + def test_create_lun(self): + setattr(self.rest_mock, 'create_lun', + lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_configure', + lambda *_: fake_esm_success) + self.rest_mock.create_lun(fake_volume_name, '10', + fake_repository_name) + self.drv.create_lun(fake_volume_name, '10', + fake_repository_name) + + def test_delete_lun(self): + setattr(self.rest_mock, 'delete_lun', + lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_get_volume_info', + lambda *_: fake_volume_info) + self.stubs.Set(CoraidRESTClient, '_configure', + lambda *_: fake_esm_success) + self.rest_mock.delete_lun(fake_volume_name) + self.drv.delete_lun(fake_volume_name) + + def test_create_snapshot(self): + setattr(self.rest_mock, 'create_snapshot', + lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_get_volume_info', + lambda *_: fake_volume_info) + self.stubs.Set(CoraidRESTClient, '_configure', + lambda *_: fake_esm_success) + self.drv.create_snapshot(fake_volume_name, + fake_volume_name) + + def test_delete_snapshot(self): + setattr(self.rest_mock, 'delete_snapshot', + lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_get_volume_info', + lambda *_: fake_volume_info) + self.stubs.Set(CoraidRESTClient, '_configure', + lambda *_: fake_esm_success) + self.drv.delete_snapshot(fake_volume_name) + + def test_create_volume_from_snapshot(self): + setattr(self.rest_mock, 'create_volume_from_snapshot', + lambda *_: True) + self.stubs.Set(CoraidRESTClient, '_get_volume_info', + lambda *_: fake_volume_info) + self.stubs.Set(CoraidRESTClient, '_configure', + lambda *_: fake_esm_success) + self.drv.create_volume_from_snapshot(fake_volume_name, + fake_volume_name, + fake_repository_name) diff --git a/cinder/tests/test_drivers_compatibility.py b/cinder/tests/test_drivers_compatibility.py new file mode 100644 index 0000000000..f69eb65ab2 --- /dev/null +++ b/cinder/tests/test_drivers_compatibility.py @@ -0,0 +1,184 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import context +from cinder import flags +from cinder.openstack.common import importutils +from cinder import test +from cinder.volume.drivers.solidfire import SolidFire + +FLAGS = flags.FLAGS + +RBD_MODULE = "cinder.volume.drivers.rbd.RBDDriver" +SHEEPDOG_MODULE = "cinder.volume.drivers.sheepdog.SheepdogDriver" +NEXENTA_MODULE = "cinder.volume.drivers.nexenta.volume.NexentaDriver" +SAN_MODULE = "cinder.volume.drivers.san.san.SanISCSIDriver" +SOLARIS_MODULE = "cinder.volume.drivers.san.solaris.SolarisISCSIDriver" +LEFTHAND_MODULE = "cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver" +NETAPP_MODULE = "cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver" +NETAPP_CMODE_MODULE =\ + "cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver" +NETAPP_NFS_MODULE = "cinder.volume.drivers.netapp.nfs.NetAppNFSDriver" +NFS_MODULE = "cinder.volume.drivers.nfs.NfsDriver" +SOLIDFIRE_MODULE = "cinder.volume.drivers.solidfire.SolidFire" +STORWIZE_SVC_MODULE = "cinder.volume.drivers.storwize_svc.StorwizeSVCDriver" +WINDOWS_MODULE = "cinder.volume.drivers.windows.WindowsDriver" +XIV_MODULE = "cinder.volume.drivers.xiv.XIVDriver" +ZADARA_MODULE = "cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver" + + +class VolumeDriverCompatibility(test.TestCase): + """Test backwards compatibility for volume drivers.""" + + def fake_update_cluster_status(self): + return + + def setUp(self): + super(VolumeDriverCompatibility, self).setUp() + self.manager = importutils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + + def tearDown(self): + super(VolumeDriverCompatibility, self).tearDown() + + def _load_driver(self, driver): + if 'SolidFire' in driver: + # SolidFire driver does update_cluster stat on init + self.stubs.Set(SolidFire, '_update_cluster_status', + self.fake_update_cluster_status) + self.manager.__init__(volume_driver=driver) + + def _driver_module_name(self): + return "%s.%s" % (self.manager.driver.__class__.__module__, + self.manager.driver.__class__.__name__) + + def test_rbd_old(self): + self._load_driver('cinder.volume.driver.RBDDriver') + self.assertEquals(self._driver_module_name(), RBD_MODULE) + + def test_rbd_new(self): + self._load_driver(RBD_MODULE) + self.assertEquals(self._driver_module_name(), RBD_MODULE) + + def test_sheepdog_old(self): + self._load_driver('cinder.volume.driver.SheepdogDriver') + self.assertEquals(self._driver_module_name(), SHEEPDOG_MODULE) + + def test_sheepdog_new(self): + self._load_driver(SHEEPDOG_MODULE) + self.assertEquals(self._driver_module_name(), SHEEPDOG_MODULE) + + def test_nexenta_old(self): + self._load_driver('cinder.volume.nexenta.volume.NexentaDriver') + self.assertEquals(self._driver_module_name(), NEXENTA_MODULE) + + def test_nexenta_new(self): + self._load_driver(NEXENTA_MODULE) + self.assertEquals(self._driver_module_name(), NEXENTA_MODULE) + + def test_san_old(self): + self._load_driver('cinder.volume.san.SanISCSIDriver') + self.assertEquals(self._driver_module_name(), SAN_MODULE) + + def test_san_new(self): + self._load_driver(SAN_MODULE) + self.assertEquals(self._driver_module_name(), SAN_MODULE) + + def test_solaris_old(self): + self._load_driver('cinder.volume.san.SolarisISCSIDriver') + self.assertEquals(self._driver_module_name(), SOLARIS_MODULE) + + def test_solaris_new(self): + self._load_driver(SOLARIS_MODULE) + self.assertEquals(self._driver_module_name(), SOLARIS_MODULE) + + def test_hp_lefthand_old(self): + self._load_driver('cinder.volume.san.HpSanISCSIDriver') + self.assertEquals(self._driver_module_name(), LEFTHAND_MODULE) + + def test_hp_lefthand_new(self): + self._load_driver(LEFTHAND_MODULE) + self.assertEquals(self._driver_module_name(), LEFTHAND_MODULE) + + def test_netapp_old(self): + self._load_driver('cinder.volume.netapp.NetAppISCSIDriver') + self.assertEquals(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_new(self): + self._load_driver(NETAPP_MODULE) + self.assertEquals(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_cmode_old(self): + self._load_driver('cinder.volume.netapp.NetAppCmodeISCSIDriver') + self.assertEquals(self._driver_module_name(), NETAPP_CMODE_MODULE) + + def test_netapp_cmode_new(self): + self._load_driver(NETAPP_CMODE_MODULE) + self.assertEquals(self._driver_module_name(), NETAPP_CMODE_MODULE) + + def test_netapp_nfs_old(self): + self._load_driver('cinder.volume.netapp_nfs.NetAppNFSDriver') + self.assertEquals(self._driver_module_name(), NETAPP_NFS_MODULE) + + def test_netapp_nfs_new(self): + self._load_driver(NETAPP_NFS_MODULE) + self.assertEquals(self._driver_module_name(), NETAPP_NFS_MODULE) + + def test_nfs_old(self): + self._load_driver('cinder.volume.nfs.NfsDriver') + self.assertEquals(self._driver_module_name(), NFS_MODULE) + + def test_nfs_new(self): + self._load_driver(NFS_MODULE) + self.assertEquals(self._driver_module_name(), NFS_MODULE) + + def test_solidfire_old(self): + self._load_driver('cinder.volume.solidfire.SolidFire') + self.assertEquals(self._driver_module_name(), SOLIDFIRE_MODULE) + + def test_solidfire_new(self): + self._load_driver(SOLIDFIRE_MODULE) + self.assertEquals(self._driver_module_name(), SOLIDFIRE_MODULE) + + def test_storwize_svc_old(self): + self._load_driver('cinder.volume.storwize_svc.StorwizeSVCDriver') + self.assertEquals(self._driver_module_name(), STORWIZE_SVC_MODULE) + + def test_storwize_svc_new(self): + self._load_driver(STORWIZE_SVC_MODULE) + self.assertEquals(self._driver_module_name(), STORWIZE_SVC_MODULE) + + def test_windows_old(self): + self._load_driver('cinder.volume.windows.WindowsDriver') + self.assertEquals(self._driver_module_name(), WINDOWS_MODULE) + + def test_windows_new(self): + self._load_driver(WINDOWS_MODULE) + self.assertEquals(self._driver_module_name(), WINDOWS_MODULE) + + def test_xiv_old(self): + self._load_driver('cinder.volume.xiv.XIVDriver') + self.assertEquals(self._driver_module_name(), XIV_MODULE) + + def test_xiv_new(self): + self._load_driver(XIV_MODULE) + self.assertEquals(self._driver_module_name(), XIV_MODULE) + + def test_zadara_old(self): + self._load_driver('cinder.volume.zadara.ZadaraVPSAISCSIDriver') + self.assertEquals(self._driver_module_name(), ZADARA_MODULE) + + def test_zadara_new(self): + self._load_driver(ZADARA_MODULE) + self.assertEquals(self._driver_module_name(), ZADARA_MODULE) diff --git a/cinder/tests/test_emc.py b/cinder/tests/test_emc.py new file mode 100644 index 0000000000..c12070629e --- /dev/null +++ b/cinder/tests/test_emc.py @@ -0,0 +1,766 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 EMC Corporation, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox +import os +import shutil +import tempfile +from xml.dom.minidom import Document + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.emc.emc_smis_common import EMCSMISCommon +from cinder.volume.drivers.emc.emc_smis_iscsi import EMCSMISISCSIDriver + +CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml' +LOG = logging.getLogger(__name__) + +config_file_name = 'cinder_emc_config.xml' +storage_system = 'CLARiiON+APM00123456789' +storage_system_vmax = 'SYMMETRIX+000195900551' +lunmaskctrl_id = 'CLARiiON+APM00123456789+00aa11bb22cc33dd44ff55gg66hh77ii88jj' +initiator1 = 'iqn.1993-08.org.debian:01:1a2b3c4d5f6g' +stconf_service_creationclass = 'Clar_StorageConfigurationService' +ctrlconf_service_creationclass = 'Clar_ControllerConfigurationService' +rep_service_creationclass = 'Clar_ReplicationService' +vol_creationclass = 'Clar_StorageVolume' +pool_creationclass = 'Clar_UnifiedStoragePool' +lunmask_creationclass = 'Clar_LunMaskingSCSIProtocolController' +unit_creationclass = 'CIM_ProtocolControllerForUnit' +storage_type = 'gold' + +test_volume = {'name': 'vol1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '1', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'test volume', + 'volume_type_id': None} +test_failed_volume = {'name': 'failed_vol', + 'size': 1, + 'volume_name': 'failed_vol', + 'id': '4', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed_vol', + 'display_description': 'test failed volume', + 'volume_type_id': None} +test_snapshot = {'name': 'snapshot1', + 'size': 1, + 'id': '4444', + 'volume_name': 'vol1', + 'volume_size': 1, + 'project_id': 'project'} +test_clone = {'name': 'clone1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '2', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone1', + 'display_description': 'volume created from snapshot', + 'volume_type_id': None} +test_clone3 = {'name': 'clone3', + 'size': 1, + 'volume_name': 'vol1', + 'id': '3', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone3', + 'display_description': 'cloned volume', + 'volume_type_id': None} +test_snapshot_vmax = {'name': 'snapshot_vmax', + 'size': 1, + 'id': '4445', + 'volume_name': 'vol1', + 'volume_size': 1, + 'project_id': 'project'} +failed_snapshot_replica = {'name': 'failed_snapshot_replica', + 'size': 1, + 'volume_name': 'vol1', + 'id': '5', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'failed snapshot replica', + 'volume_type_id': None} +failed_snapshot_sync = {'name': 'failed_snapshot_sync', + 'size': 1, + 'volume_name': 'vol1', + 'id': '6', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed_snapshot_sync', + 'display_description': 'failed snapshot sync', + 'volume_type_id': None} +failed_clone_replica = {'name': 'failed_clone_replica', + 'size': 1, + 'volume_name': 'vol1', + 'id': '7', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'failed clone replica', + 'volume_type_id': None} +failed_clone_sync = {'name': 'failed_clone_sync', + 'size': 1, + 'volume_name': 'vol1', + 'id': '8', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'failed clone sync', + 'volume_type_id': None} +failed_delete_vol = {'name': 'failed_delete_vol', + 'size': 1, + 'volume_name': 'failed_delete_vol', + 'id': '99999', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed delete vol', + 'display_description': 'failed delete volume', + 'volume_type_id': None} + + +class EMC_StorageVolume(dict): + pass + + +class SE_ConcreteJob(dict): + pass + + +class FakeEcomConnection(): + + def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, + ElementType=None, Size=None, + SyncType=None, SourceElement=None, + Operation=None, Synchronization=None, + TheElements=None, + LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None, + ProtocolControllers=None, + MaskingGroup=None, Members=None): + + rc = 0L + myjob = SE_ConcreteJob() + myjob.classname = 'SE_ConcreteJob' + myjob['InstanceID'] = '9999' + myjob['status'] = 'success' + if ElementName == 'failed_vol' and \ + MethodName == 'CreateOrModifyElementFromStoragePool': + rc = 10L + myjob['status'] = 'failure' + elif ElementName == 'failed_snapshot_replica' and \ + MethodName == 'CreateElementReplica': + rc = 10L + myjob['status'] = 'failure' + elif Synchronization and \ + Synchronization['SyncedElement']['ElementName'] \ + == 'failed_snapshot_sync' and \ + MethodName == 'ModifyReplicaSynchronization': + rc = 10L + myjob['status'] = 'failure' + elif ElementName == 'failed_clone_replica' and \ + MethodName == 'CreateElementReplica': + rc = 10L + myjob['status'] = 'failure' + elif Synchronization and \ + Synchronization['SyncedElement']['ElementName'] \ + == 'failed_clone_sync' and \ + MethodName == 'ModifyReplicaSynchronization': + rc = 10L + myjob['status'] = 'failure' + elif TheElements and \ + TheElements[0]['DeviceID'] == '99999' and \ + MethodName == 'EMCReturnToStoragePool': + rc = 10L + myjob['status'] = 'failure' + + job = {'Job': myjob} + return rc, job + + def EnumerateInstanceNames(self, name): + result = None + if name == 'EMC_ReplicationService': + result = self._enum_replicationservices() + elif name == 'EMC_StorageConfigurationService': + result = self._enum_stconfsvcs() + elif name == 'EMC_ControllerConfigurationService': + result = self._enum_ctrlconfsvcs() + elif name == 'EMC_VirtualProvisioningPool': + result = self._enum_pools() + elif name == 'EMC_UnifiedStoragePool': + result = self._enum_pools() + elif name == 'EMC_StorageVolume': + result = self._enum_storagevolumes() + elif name == 'Clar_StorageVolume': + result = self._enum_storagevolumes() + elif name == 'SE_StorageSynchronized_SV_SV': + result = self._enum_syncsvsvs() + elif name == 'CIM_ProtocolControllerForUnit': + result = self._enum_unitnames() + elif name == 'EMC_LunMaskingSCSIProtocolController': + result = self._enum_lunmaskctrls() + elif name == 'EMC_StorageProcessorSystem': + result = self._enum_processors() + else: + result = self._default_enum() + return result + + def EnumerateInstances(self, name): + result = None + if name == 'EMC_VirtualProvisioningPool': + result = self._enum_pool_details() + elif name == 'EMC_UnifiedStoragePool': + result = self._enum_pool_details() + else: + result = self._default_enum() + return result + + def GetInstance(self, objectpath, LocalOnly=False): + try: + name = objectpath['CreationClassName'] + except KeyError: + name = objectpath.classname + result = None + if name == 'Clar_StorageVolume': + result = self._getinstance_storagevolume(objectpath) + elif name == 'CIM_ProtocolControllerForUnit': + result = self._getinstance_unit(objectpath) + elif name == 'Clar_LunMaskingSCSIProtocolController': + result = self._getinstance_lunmask() + elif name == 'SE_ConcreteJob': + result = self._getinstance_job(objectpath) + elif name == 'SE_StorageSynchronized_SV_SV': + result = self._getinstance_syncsvsv(objectpath) + else: + result = self._default_getinstance(objectpath) + return result + + def Associators(self, objectpath, resultClass='EMC_StorageHardwareID'): + result = None + if resultClass == 'EMC_StorageHardwareID': + result = self._assoc_hdwid() + elif resultClass == 'EMC_iSCSIProtocolEndpoint': + result = self._assoc_endpoint() + else: + result = self._default_assoc(objectpath) + return result + + def AssociatorNames(self, objectpath, + resultClass='EMC_LunMaskingSCSIProtocolController'): + result = None + if resultClass == 'EMC_LunMaskingSCSIProtocolController': + result = self._assocnames_lunmaskctrl() + else: + result = self._default_assocnames(objectpath) + return result + + def ReferenceNames(self, objectpath, + ResultClass='CIM_ProtocolControllerForUnit'): + result = None + if ResultClass == 'CIM_ProtocolControllerForUnit': + result = self._ref_unitnames() + else: + result = self._default_ref(objectpath) + return result + + def _ref_unitnames(self): + units = [] + unit = {} + + dependent = {} + dependent['CreationClassName'] = vol_creationclass + dependent['DeviceID'] = test_volume['id'] + dependent['ElementName'] = test_volume['name'] + dependent['SystemName'] = storage_system + + antecedent = {} + antecedent['CreationClassName'] = lunmask_creationclass + antecedent['DeviceID'] = lunmaskctrl_id + antecedent['SystemName'] = storage_system + + unit['Dependent'] = dependent + unit['Antecedent'] = antecedent + unit['CreationClassName'] = unit_creationclass + units.append(unit) + + return units + + def _default_ref(self, objectpath): + return objectpath + + def _assoc_hdwid(self): + assocs = [] + assoc = {} + assoc['StorageID'] = initiator1 + assocs.append(assoc) + return assocs + + def _assoc_endpoint(self): + assocs = [] + assoc = {} + assoc['Name'] = 'iqn.1992-04.com.emc:cx.apm00123907237.a8,t,0x0001' + assoc['SystemName'] = storage_system + '+SP_A+8' + assocs.append(assoc) + return assocs + + def _default_assoc(self, objectpath): + return objectpath + + def _assocnames_lunmaskctrl(self): + return self._enum_lunmaskctrls() + + def _default_assocnames(self, objectpath): + return objectpath + + def _getinstance_storagevolume(self, objectpath): + instance = EMC_StorageVolume() + vols = self._enum_storagevolumes() + for vol in vols: + if vol['DeviceID'] == objectpath['DeviceID']: + instance = vol + break + return instance + + def _getinstance_syncsvsv(self, objectpath): + foundsync = None + syncs = self._enum_syncsvsvs() + for sync in syncs: + if (sync['SyncedElement'] == objectpath['SyncedElement'] and + sync['SystemElement'] == objectpath['SystemElement']): + foundsync = sync + break + return foundsync + + def _getinstance_lunmask(self): + lunmask = {} + lunmask['CreationClassName'] = lunmask_creationclass + lunmask['DeviceID'] = lunmaskctrl_id + lunmask['SystemName'] = storage_system + return lunmask + + def _getinstance_unit(self, objectpath): + unit = {} + + dependent = {} + dependent['CreationClassName'] = vol_creationclass + dependent['DeviceID'] = test_volume['id'] + dependent['ElementName'] = test_volume['name'] + dependent['SystemName'] = storage_system + + antecedent = {} + antecedent['CreationClassName'] = lunmask_creationclass + antecedent['DeviceID'] = lunmaskctrl_id + antecedent['SystemName'] = storage_system + + unit['Dependent'] = dependent + unit['Antecedent'] = antecedent + unit['CreationClassName'] = unit_creationclass + unit['DeviceNumber'] = '0' + + return unit + + def _getinstance_job(self, jobpath): + jobinstance = {} + jobinstance['InstanceID'] = '9999' + if jobpath['status'] == 'failure': + jobinstance['JobState'] = 10 + jobinstance['ErrorCode'] = 99 + jobinstance['ErrorDescription'] = 'Failure' + else: + jobinstance['JobState'] = 7 + jobinstance['ErrorCode'] = 0 + jobinstance['ErrorDescription'] = '' + return jobinstance + + def _default_getinstance(self, objectpath): + return objectpath + + def _enum_replicationservices(self): + rep_services = [] + rep_service = {} + rep_service['SystemName'] = storage_system + rep_service['CreationClassName'] = rep_service_creationclass + rep_services.append(rep_service) + return rep_services + + def _enum_stconfsvcs(self): + conf_services = [] + conf_service = {} + conf_service['SystemName'] = storage_system + conf_service['CreationClassName'] = stconf_service_creationclass + conf_services.append(conf_service) + return conf_services + + def _enum_ctrlconfsvcs(self): + conf_services = [] + conf_service = {} + conf_service['SystemName'] = storage_system + conf_service['CreationClassName'] = ctrlconf_service_creationclass + conf_services.append(conf_service) + return conf_services + + def _enum_pools(self): + pools = [] + pool = {} + pool['InstanceID'] = storage_system + '+U+' + storage_type + pool['CreationClassName'] = 'Clar_UnifiedStoragePool' + pools.append(pool) + return pools + + def _enum_pool_details(self): + pools = [] + pool = {} + pool['InstanceID'] = storage_system + '+U+' + storage_type + pool['CreationClassName'] = 'Clar_UnifiedStoragePool' + pool['TotalManagedSpace'] = 12345678 + pool['RemainingManagedSpace'] = 123456 + pools.append(pool) + return pools + + def _enum_storagevolumes(self): + vols = [] + vol = EMC_StorageVolume() + vol['CreationClassName'] = 'Clar_StorageVolume' + vol['ElementName'] = test_volume['name'] + vol['DeviceID'] = test_volume['id'] + vol['SystemName'] = storage_system + vol.path = {'DeviceID': vol['DeviceID']} + vols.append(vol) + + snap_vol = EMC_StorageVolume() + snap_vol['CreationClassName'] = 'Clar_StorageVolume' + snap_vol['ElementName'] = test_snapshot['name'] + snap_vol['DeviceID'] = test_snapshot['id'] + snap_vol['SystemName'] = storage_system + snap_vol.path = {'DeviceID': snap_vol['DeviceID']} + vols.append(snap_vol) + + clone_vol = EMC_StorageVolume() + clone_vol['CreationClassName'] = 'Clar_StorageVolume' + clone_vol['ElementName'] = test_clone['name'] + clone_vol['DeviceID'] = test_clone['id'] + clone_vol['SystemName'] = storage_system + clone_vol.path = {'DeviceID': clone_vol['DeviceID']} + vols.append(clone_vol) + + clone_vol3 = EMC_StorageVolume() + clone_vol3['CreationClassName'] = 'Clar_StorageVolume' + clone_vol3['ElementName'] = test_clone3['name'] + clone_vol3['DeviceID'] = test_clone3['id'] + clone_vol3['SystemName'] = storage_system + clone_vol3.path = {'DeviceID': clone_vol3['DeviceID']} + vols.append(clone_vol3) + + snap_vol_vmax = EMC_StorageVolume() + snap_vol_vmax['CreationClassName'] = 'Symm_StorageVolume' + snap_vol_vmax['ElementName'] = test_snapshot_vmax['name'] + snap_vol_vmax['DeviceID'] = test_snapshot_vmax['id'] + snap_vol_vmax['SystemName'] = storage_system_vmax + snap_vol_vmax.path = {'DeviceID': snap_vol_vmax['DeviceID']} + vols.append(snap_vol_vmax) + + failed_snap_replica = EMC_StorageVolume() + failed_snap_replica['CreationClassName'] = 'Clar_StorageVolume' + failed_snap_replica['ElementName'] = failed_snapshot_replica['name'] + failed_snap_replica['DeviceID'] = failed_snapshot_replica['id'] + failed_snap_replica['SystemName'] = storage_system + failed_snap_replica.path = { + 'DeviceID': failed_snap_replica['DeviceID']} + vols.append(failed_snap_replica) + + failed_snap_sync = EMC_StorageVolume() + failed_snap_sync['CreationClassName'] = 'Clar_StorageVolume' + failed_snap_sync['ElementName'] = failed_snapshot_sync['name'] + failed_snap_sync['DeviceID'] = failed_snapshot_sync['id'] + failed_snap_sync['SystemName'] = storage_system + failed_snap_sync.path = { + 'DeviceID': failed_snap_sync['DeviceID']} + vols.append(failed_snap_sync) + + failed_clone_rep = EMC_StorageVolume() + failed_clone_rep['CreationClassName'] = 'Clar_StorageVolume' + failed_clone_rep['ElementName'] = failed_clone_replica['name'] + failed_clone_rep['DeviceID'] = failed_clone_replica['id'] + failed_clone_rep['SystemName'] = storage_system + failed_clone_rep.path = { + 'DeviceID': failed_clone_rep['DeviceID']} + vols.append(failed_clone_rep) + + failed_clone_s = EMC_StorageVolume() + failed_clone_s['CreationClassName'] = 'Clar_StorageVolume' + failed_clone_s['ElementName'] = failed_clone_sync['name'] + failed_clone_s['DeviceID'] = failed_clone_sync['id'] + failed_clone_s['SystemName'] = storage_system + failed_clone_s.path = { + 'DeviceID': failed_clone_s['DeviceID']} + vols.append(failed_clone_s) + + failed_delete_vol = EMC_StorageVolume() + failed_delete_vol['CreationClassName'] = 'Clar_StorageVolume' + failed_delete_vol['ElementName'] = 'failed_delete_vol' + failed_delete_vol['DeviceID'] = '99999' + failed_delete_vol['SystemName'] = storage_system + failed_delete_vol.path = {'DeviceID': failed_delete_vol['DeviceID']} + vols.append(failed_delete_vol) + + return vols + + def _enum_syncsvsvs(self): + syncs = [] + + vols = self._enum_storagevolumes() + + sync = self._create_sync(vols[0], vols[1], 100) + syncs.append(sync) + + sync2 = self._create_sync(vols[1], vols[2], 100) + syncs.append(sync2) + + sync3 = self._create_sync(vols[0], vols[3], 100) + syncs.append(sync3) + + objpath1 = vols[1] + for vol in vols: + if vol['ElementName'] == 'failed_snapshot_sync': + objpath2 = vol + break + sync4 = self._create_sync(objpath1, objpath2, 100) + syncs.append(sync4) + + objpath1 = vols[0] + for vol in vols: + if vol['ElementName'] == 'failed_clone_sync': + objpath2 = vol + break + sync5 = self._create_sync(objpath1, objpath2, 100) + syncs.append(sync5) + + return syncs + + def _create_sync(self, objpath1, objpath2, percentsynced): + sync = {} + sync['SyncedElement'] = objpath2 + sync['SystemElement'] = objpath1 + sync['CreationClassName'] = 'SE_StorageSynchronized_SV_SV' + sync['PercentSynced'] = percentsynced + return sync + + def _enum_unitnames(self): + return self._ref_unitnames() + + def _enum_lunmaskctrls(self): + ctrls = [] + ctrl = {} + ctrl['CreationClassName'] = lunmask_creationclass + ctrl['DeviceID'] = lunmaskctrl_id + ctrl['SystemName'] = storage_system + ctrls.append(ctrl) + return ctrls + + def _enum_processors(self): + ctrls = [] + ctrl = {} + ctrl['CreationClassName'] = 'Clar_StorageProcessorSystem' + ctrl['Name'] = storage_system + '+SP_A' + ctrls.append(ctrl) + return ctrls + + def _default_enum(self): + names = [] + name = {} + name['Name'] = 'default' + names.append(name) + return names + + +class EMCSMISISCSIDriverTestCase(test.TestCase): + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(EMCSMISISCSIDriverTestCase, self).setUp() + self.config_file_path = None + self.create_fake_config_file() + + configuration = mox.MockObject(conf.Configuration) + configuration.cinder_emc_config_file = self.config_file_path + configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(EMCSMISISCSIDriver, '_do_iscsi_discovery', + self.fake_do_iscsi_discovery) + self.stubs.Set(EMCSMISCommon, '_get_ecom_connection', + self.fake_ecom_connection) + driver = EMCSMISISCSIDriver(configuration=configuration) + self.driver = driver + + def create_fake_config_file(self): + doc = Document() + emc = doc.createElement("EMC") + doc.appendChild(emc) + + storagetype = doc.createElement("StorageType") + storagetypetext = doc.createTextNode("gold") + emc.appendChild(storagetype) + storagetype.appendChild(storagetypetext) + + ecomserverip = doc.createElement("EcomServerIp") + ecomserveriptext = doc.createTextNode("1.1.1.1") + emc.appendChild(ecomserverip) + ecomserverip.appendChild(ecomserveriptext) + + ecomserverport = doc.createElement("EcomServerPort") + ecomserverporttext = doc.createTextNode("10") + emc.appendChild(ecomserverport) + ecomserverport.appendChild(ecomserverporttext) + + ecomusername = doc.createElement("EcomUserName") + ecomusernametext = doc.createTextNode("user") + emc.appendChild(ecomusername) + ecomusername.appendChild(ecomusernametext) + + ecompassword = doc.createElement("EcomPassword") + ecompasswordtext = doc.createTextNode("pass") + emc.appendChild(ecompassword) + ecompassword.appendChild(ecompasswordtext) + + self.config_file_path = self.tempdir + '/' + config_file_name + f = open(self.config_file_path, 'w') + doc.writexml(f) + f.close() + + def fake_ecom_connection(self): + conn = FakeEcomConnection() + return conn + + def fake_do_iscsi_discovery(self, volume): + output = [] + item = '10.0.0.3:3260,1 iqn.1992-04.com.emc:cx.apm00123907237.a8' + item2 = '10.0.0.4:3260,2 iqn.1992-04.com.emc:cx.apm00123907237.b8' + output.append(item) + output.append(item2) + return output + + def test_get_volume_stats(self): + self.driver.get_volume_stats(True) + + def test_create_destroy(self): + self.driver.create_volume(test_volume) + self.driver.delete_volume(test_volume) + + def test_create_volume_snapshot_destroy(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot) + self.driver.create_volume_from_snapshot( + test_clone, test_snapshot) + self.driver.create_cloned_volume( + test_clone3, test_volume) + self.driver.delete_volume(test_clone) + self.driver.delete_volume(test_clone3) + self.driver.delete_snapshot(test_snapshot) + self.driver.delete_volume(test_volume) + + def test_map_unmap(self): + self.driver.create_volume(test_volume) + export = self.driver.create_export(None, test_volume) + test_volume['provider_location'] = export['provider_location'] + test_volume['EMCCurrentOwningStorageProcessor'] = 'SP_A' + connector = {'initiator': initiator1} + connection_info = self.driver.initialize_connection(test_volume, + connector) + self.driver.terminate_connection(test_volume, connector) + self.driver.remove_export(None, test_volume) + self.driver.delete_volume(test_volume) + + def test_create_volume_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, + test_failed_volume) + + def test_create_volume_snapshot_unsupported(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot_vmax) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + test_clone, + test_snapshot_vmax) + self.driver.delete_snapshot(test_snapshot_vmax) + self.driver.delete_volume(test_volume) + + def test_create_volume_snapshot_replica_failed(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + failed_snapshot_replica, + test_snapshot) + self.driver.delete_snapshot(test_snapshot) + self.driver.delete_volume(test_volume) + + def test_create_volume_snapshot_sync_failed(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + failed_snapshot_sync, + test_snapshot) + self.driver.delete_snapshot(test_snapshot) + self.driver.delete_volume(test_volume) + + def test_create_volume_clone_replica_failed(self): + self.driver.create_volume(test_volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + failed_clone_replica, + test_volume) + self.driver.delete_volume(test_volume) + + def test_create_volume_clone_sync_failed(self): + self.driver.create_volume(test_volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + failed_clone_sync, + test_volume) + self.driver.delete_volume(test_volume) + + def test_delete_volume_notfound(self): + notfound_delete_vol = {} + notfound_delete_vol['name'] = 'notfound_delete_vol' + notfound_delete_vol['id'] = '10' + self.driver.delete_volume(notfound_delete_vol) + + def test_delete_volume_failed(self): + self.driver.create_volume(failed_delete_vol) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, + failed_delete_vol) + + def _cleanup(self): + bExists = os.path.exists(self.config_file_path) + if bExists: + os.remove(self.config_file_path) + shutil.rmtree(self.tempdir) + + def tearDown(self): + self._cleanup() + super(EMCSMISISCSIDriverTestCase, self).tearDown() diff --git a/cinder/tests/test_exception.py b/cinder/tests/test_exception.py new file mode 100644 index 0000000000..c47f116e49 --- /dev/null +++ b/cinder/tests/test_exception.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder import test +from cinder import utils + + +class FakeNotifier(object): + """Acts like the cinder.openstack.common.notifier.api module.""" + ERROR = 88 + + def __init__(self): + self.provided_publisher = None + self.provided_event = None + self.provided_priority = None + self.provided_payload = None + + def notify(self, context, publisher, event, priority, payload): + self.provided_publisher = publisher + self.provided_event = event + self.provided_priority = priority + self.provided_payload = payload + + +def good_function(): + return 99 + + +def bad_function_error(): + raise exception.Error() + + +def bad_function_exception(): + raise test.TestingException() + + +class CinderExceptionTestCase(test.TestCase): + def test_default_error_msg(self): + class FakeCinderException(exception.CinderException): + message = "default message" + + exc = FakeCinderException() + self.assertEquals(unicode(exc), 'default message') + + def test_error_msg(self): + self.assertEquals(unicode(exception.CinderException('test')), + 'test') + + def test_default_error_msg_with_kwargs(self): + class FakeCinderException(exception.CinderException): + message = "default message: %(code)s" + + exc = FakeCinderException(code=500) + self.assertEquals(unicode(exc), 'default message: 500') + + def test_error_msg_exception_with_kwargs(self): + # NOTE(dprince): disable format errors for this test + self.flags(fatal_exception_format_errors=False) + + class FakeCinderException(exception.CinderException): + message = "default message: %(mispelled_code)s" + + exc = FakeCinderException(code=500) + self.assertEquals(unicode(exc), 'default message: %(mispelled_code)s') + + def test_default_error_code(self): + class FakeCinderException(exception.CinderException): + code = 404 + + exc = FakeCinderException() + self.assertEquals(exc.kwargs['code'], 404) + + def test_error_code_from_kwarg(self): + class FakeCinderException(exception.CinderException): + code = 500 + + exc = FakeCinderException(code=404) + self.assertEquals(exc.kwargs['code'], 404) diff --git a/cinder/tests/test_flags.py b/cinder/tests/test_flags.py new file mode 100644 index 0000000000..1619048489 --- /dev/null +++ b/cinder/tests/test_flags.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import flags +from cinder import test + +FLAGS = flags.FLAGS +FLAGS.register_opt(cfg.StrOpt('flags_unittest', + default='foo', + help='for testing purposes only')) + + +class FlagsTestCase(test.TestCase): + + def setUp(self): + super(FlagsTestCase, self).setUp() + + def test_declare(self): + self.assert_('answer' not in FLAGS) + flags.DECLARE('answer', 'cinder.tests.declare_flags') + self.assert_('answer' in FLAGS) + self.assertEqual(FLAGS.answer, 42) + + # Make sure we don't overwrite anything + FLAGS.set_override('answer', 256) + self.assertEqual(FLAGS.answer, 256) + flags.DECLARE('answer', 'cinder.tests.declare_flags') + self.assertEqual(FLAGS.answer, 256) + + def test_runtime_and_unknown_flags(self): + self.assert_('runtime_answer' not in FLAGS) + import cinder.tests.runtime_flags + self.assert_('runtime_answer' in FLAGS) + self.assertEqual(FLAGS.runtime_answer, 54) + + def test_long_vs_short_flags(self): + FLAGS.clear() + FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long', + default='val', + help='desc')) + FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer', + default=50, + help='desc')) + + argv = ['flags_test', '--duplicate_answer=60'] + flags.parse_args(argv, default_config_files=[]) + self.assertEqual(FLAGS.duplicate_answer, 60) + self.assertEqual(FLAGS.duplicate_answer_long, 'val') + + def test_flag_leak_left(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + self.flags(flags_unittest='bar') + self.assertEqual(FLAGS.flags_unittest, 'bar') + + def test_flag_leak_right(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + self.flags(flags_unittest='bar') + self.assertEqual(FLAGS.flags_unittest, 'bar') + + def test_flag_overrides(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + self.flags(flags_unittest='bar') + self.assertEqual(FLAGS.flags_unittest, 'bar') + FLAGS.reset() + self.assertEqual(FLAGS.flags_unittest, 'foo') diff --git a/cinder/tests/test_glusterfs.py b/cinder/tests/test_glusterfs.py new file mode 100644 index 0000000000..a210d57cb1 --- /dev/null +++ b/cinder/tests/test_glusterfs.py @@ -0,0 +1,579 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the GlusterFS driver module.""" + +import __builtin__ +import errno +import os + +import mox as mox_lib +from mox import IgnoreArg +from mox import IsA +from mox import stubout + +from cinder import context +from cinder import exception +from cinder.exception import ProcessExecutionError +from cinder import test + +from cinder.volume import configuration as conf +from cinder.volume.drivers import glusterfs + + +class DumbVolume(object): + fields = {} + + def __setitem__(self, key, value): + self.fields[key] = value + + def __getitem__(self, item): + return self.fields[item] + + +class GlusterFsDriverTestCase(test.TestCase): + """Test case for GlusterFS driver.""" + + TEST_EXPORT1 = 'glusterfs-host1:/export' + TEST_EXPORT2 = 'glusterfs-host2:/export' + TEST_SIZE_IN_GB = 1 + TEST_MNT_POINT = '/mnt/glusterfs' + TEST_MNT_POINT_BASE = '/mnt/test' + TEST_LOCAL_PATH = '/mnt/glusterfs/volume-123' + TEST_FILE_NAME = 'test.txt' + TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' + ONE_GB_IN_BYTES = 1024 * 1024 * 1024 + + def setUp(self): + self._mox = mox_lib.Mox() + self._configuration = mox_lib.MockObject(conf.Configuration) + self._configuration.append_config_values(mox_lib.IgnoreArg()) + self._configuration.glusterfs_shares_config = \ + self.TEST_SHARES_CONFIG_FILE + self._configuration.glusterfs_mount_point_base = \ + self.TEST_MNT_POINT_BASE + self._configuration.glusterfs_disk_util = 'df' + self._configuration.glusterfs_sparsed_volumes = True + + self.stubs = stubout.StubOutForTesting() + self._driver = glusterfs.GlusterfsDriver( + configuration=self._configuration) + + def tearDown(self): + self._mox.UnsetStubs() + self.stubs.UnsetAll() + + def stub_out_not_replaying(self, obj, attr_name): + attr_to_replace = getattr(obj, attr_name) + stub = mox_lib.MockObject(attr_to_replace) + self.stubs.Set(obj, attr_name, stub) + + def test_local_path(self): + """local_path common use case.""" + glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE + drv = self._driver + + volume = DumbVolume() + volume['provider_location'] = self.TEST_EXPORT1 + volume['name'] = 'volume-123' + + self.assertEqual( + '/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc/volume-123', + drv.local_path(volume)) + + def test_mount_glusterfs_should_mount_correctly(self): + """_mount_glusterfs common case usage.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1, + self.TEST_MNT_POINT, run_as_root=True) + + mox.ReplayAll() + + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_mount_glusterfs_should_suppress_already_mounted_error(self): + """_mount_glusterfs should suppress already mounted error if + ensure=True + """ + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1, + self.TEST_MNT_POINT, run_as_root=True).\ + AndRaise(ProcessExecutionError( + stderr='is busy or already mounted')) + + mox.ReplayAll() + + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT, + ensure=True) + + mox.VerifyAll() + + def test_mount_glusterfs_should_reraise_already_mounted_error(self): + """_mount_glusterfs should not suppress already mounted error + if ensure=False + """ + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute( + 'mount', + '-t', + 'glusterfs', + self.TEST_EXPORT1, + self.TEST_MNT_POINT, + run_as_root=True). \ + AndRaise(ProcessExecutionError(stderr='is busy or ' + 'already mounted')) + + mox.ReplayAll() + + self.assertRaises(ProcessExecutionError, drv._mount_glusterfs, + self.TEST_EXPORT1, self.TEST_MNT_POINT, + ensure=False) + + mox.VerifyAll() + + def test_mount_glusterfs_should_create_mountpoint_if_not_yet(self): + """_mount_glusterfs should create mountpoint if it doesn't exist.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg()) + + mox.ReplayAll() + + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_get_hash_str(self): + """_get_hash_str should calculation correct value.""" + drv = self._driver + + self.assertEqual('ab03ab34eaca46a5fb81878f7e9b91fc', + drv._get_hash_str(self.TEST_EXPORT1)) + + def test_get_mount_point_for_share(self): + """_get_mount_point_for_share should calculate correct value.""" + drv = self._driver + + glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE + + self.assertEqual('/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc', + drv._get_mount_point_for_share( + self.TEST_EXPORT1)) + + def test_get_available_capacity_with_df(self): + """_get_available_capacity should calculate correct value.""" + mox = self._mox + drv = self._driver + + df_total_size = 2620544 + df_avail = 1490560 + df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' + df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \ + (df_total_size, df_avail) + df_output = df_head + df_data + + setattr(glusterfs.FLAGS, 'glusterfs_disk_util', 'df') + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('df', '--portability', '--block-size', '1', + self.TEST_MNT_POINT, + run_as_root=True).AndReturn((df_output, None)) + + mox.ReplayAll() + + self.assertEquals((df_avail, df_total_size), + drv._get_available_capacity( + self.TEST_EXPORT1)) + + mox.VerifyAll() + + delattr(glusterfs.FLAGS, 'glusterfs_disk_util') + + def test_get_available_capacity_with_du(self): + """_get_available_capacity should calculate correct value.""" + mox = self._mox + drv = self._driver + + old_value = self._configuration.glusterfs_disk_util + self._configuration.glusterfs_disk_util = 'du' + + df_total_size = 2620544 + df_used_size = 996864 + df_avail_size = 1490560 + df_title = 'Filesystem 1-blocks Used Available Use% Mounted on\n' + df_mnt_data = 'glusterfs-host:/export %d %d %d 41%% /mnt' % \ + (df_total_size, + df_used_size, + df_avail_size) + df_output = df_title + df_mnt_data + + du_used = 490560 + du_output = '%d /mnt' % du_used + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('df', '--portability', '--block-size', '1', + self.TEST_MNT_POINT, + run_as_root=True).\ + AndReturn((df_output, None)) + drv._execute('du', '-sb', '--apparent-size', + '--exclude', '*snapshot*', + self.TEST_MNT_POINT, + run_as_root=True).AndReturn((du_output, None)) + + mox.ReplayAll() + + self.assertEquals((df_total_size - du_used, df_total_size), + drv._get_available_capacity( + self.TEST_EXPORT1)) + + mox.VerifyAll() + + self._configuration.glusterfs_disk_util = old_value + + def test_load_shares_config(self): + mox = self._mox + drv = self._driver + + glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + mox.StubOutWithMock(__builtin__, 'open') + config_data = [] + config_data.append(self.TEST_EXPORT1) + config_data.append('#' + self.TEST_EXPORT2) + config_data.append('') + __builtin__.open(self.TEST_SHARES_CONFIG_FILE).AndReturn(config_data) + mox.ReplayAll() + + shares = drv._load_shares_config() + + self.assertEqual([self.TEST_EXPORT1], shares) + + mox.VerifyAll() + + def test_ensure_share_mounted(self): + """_ensure_share_mounted simple use case.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_mount_glusterfs') + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT, + ensure=True) + + mox.ReplayAll() + + drv._ensure_share_mounted(self.TEST_EXPORT1) + + mox.VerifyAll() + + def test_ensure_shares_mounted_should_save_mounting_successfully(self): + """_ensure_shares_mounted should save share if mounted with success.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_load_shares_config') + drv._load_shares_config().AndReturn([self.TEST_EXPORT1]) + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_EXPORT1) + + mox.ReplayAll() + + drv._ensure_shares_mounted() + + self.assertEqual(1, len(drv._mounted_shares)) + self.assertEqual(self.TEST_EXPORT1, drv._mounted_shares[0]) + + mox.VerifyAll() + + def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): + """_ensure_shares_mounted should not save share if failed to mount.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_load_shares_config') + drv._load_shares_config().AndReturn([self.TEST_EXPORT1]) + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_EXPORT1).AndRaise(Exception()) + + mox.ReplayAll() + + drv._ensure_shares_mounted() + + self.assertEqual(0, len(drv._mounted_shares)) + + mox.VerifyAll() + + def test_setup_should_throw_error_if_shares_config_not_configured(self): + """do_setup should throw error if shares config is not configured.""" + drv = self._driver + + glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + self.assertRaises(exception.GlusterfsException, + drv.do_setup, IsA(context.RequestContext)) + + def test_setup_should_throw_exception_if_client_is_not_installed(self): + """do_setup should throw exception if client is not installed.""" + mox = self._mox + drv = self._driver + + glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + mox.StubOutWithMock(os.path, 'exists') + os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True) + mox.StubOutWithMock(drv, '_execute') + drv._execute('mount.glusterfs', check_exit_code=False).\ + AndRaise(OSError(errno.ENOENT, 'No such file or directory')) + + mox.ReplayAll() + + self.assertRaises(exception.GlusterfsException, + drv.do_setup, IsA(context.RequestContext)) + + mox.VerifyAll() + + def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): + """_find_share should throw error if there is no mounted shares.""" + drv = self._driver + + drv._mounted_shares = [] + + self.assertRaises(exception.NotFound, drv._find_share, + self.TEST_SIZE_IN_GB) + + def test_find_share(self): + """_find_share simple use case.""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_EXPORT1, self.TEST_EXPORT2] + + mox.StubOutWithMock(drv, '_get_available_capacity') + drv._get_available_capacity(self.TEST_EXPORT1).\ + AndReturn((2 * self.ONE_GB_IN_BYTES, 5 * self.ONE_GB_IN_BYTES)) + drv._get_available_capacity(self.TEST_EXPORT2).\ + AndReturn((3 * self.ONE_GB_IN_BYTES, 10 * self.ONE_GB_IN_BYTES)) + + mox.ReplayAll() + + self.assertEqual(self.TEST_EXPORT2, + drv._find_share(self.TEST_SIZE_IN_GB)) + + mox.VerifyAll() + + def test_find_share_should_throw_error_if_there_is_no_enough_place(self): + """_find_share should throw error if there is no share to host vol.""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_EXPORT1, + self.TEST_EXPORT2] + + mox.StubOutWithMock(drv, '_get_available_capacity') + drv._get_available_capacity(self.TEST_EXPORT1).\ + AndReturn((0, 5 * self.ONE_GB_IN_BYTES)) + drv._get_available_capacity(self.TEST_EXPORT2).\ + AndReturn((0, 10 * self.ONE_GB_IN_BYTES)) + + mox.ReplayAll() + + self.assertRaises(exception.GlusterfsNoSuitableShareFound, + drv._find_share, + self.TEST_SIZE_IN_GB) + + mox.VerifyAll() + + def _simple_volume(self): + volume = DumbVolume() + volume['provider_location'] = '127.0.0.1:/mnt' + volume['name'] = 'volume_name' + volume['size'] = 10 + + return volume + + def test_create_sparsed_volume(self): + mox = self._mox + drv = self._driver + volume = self._simple_volume() + + setattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes', True) + + mox.StubOutWithMock(drv, '_create_sparsed_file') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + drv._create_sparsed_file(IgnoreArg(), IgnoreArg()) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + delattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes') + + def test_create_nonsparsed_volume(self): + mox = self._mox + drv = self._driver + volume = self._simple_volume() + + old_value = self._configuration.glusterfs_sparsed_volumes + self._configuration.glusterfs_sparsed_volumes = False + + mox.StubOutWithMock(drv, '_create_regular_file') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + drv._create_regular_file(IgnoreArg(), IgnoreArg()) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + self._configuration.glusterfs_sparsed_volumes = old_value + + def test_create_volume_should_ensure_glusterfs_mounted(self): + """create_volume ensures shares provided in config are mounted.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(glusterfs, 'LOG') + self.stub_out_not_replaying(drv, '_find_share') + self.stub_out_not_replaying(drv, '_do_create_volume') + + mox.StubOutWithMock(drv, '_ensure_shares_mounted') + drv._ensure_shares_mounted() + + mox.ReplayAll() + + volume = DumbVolume() + volume['size'] = self.TEST_SIZE_IN_GB + drv.create_volume(volume) + + mox.VerifyAll() + + def test_create_volume_should_return_provider_location(self): + """create_volume should return provider_location with found share.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(glusterfs, 'LOG') + self.stub_out_not_replaying(drv, '_ensure_shares_mounted') + self.stub_out_not_replaying(drv, '_do_create_volume') + + mox.StubOutWithMock(drv, '_find_share') + drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_EXPORT1) + + mox.ReplayAll() + + volume = DumbVolume() + volume['size'] = self.TEST_SIZE_IN_GB + result = drv.create_volume(volume) + self.assertEqual(self.TEST_EXPORT1, result['provider_location']) + + mox.VerifyAll() + + def test_delete_volume(self): + """delete_volume simple test case.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = self.TEST_EXPORT1 + + mox.StubOutWithMock(drv, 'local_path') + drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True) + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_delete_should_ensure_share_mounted(self): + """delete_volume should ensure that corresponding share is mounted.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_execute') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = self.TEST_EXPORT1 + + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_EXPORT1) + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_delete_should_not_delete_if_provider_location_not_provided(self): + """delete_volume shouldn't delete if provider_location missed.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = None + + mox.StubOutWithMock(drv, '_execute') + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() diff --git a/cinder/tests/test_hp3par.py b/cinder/tests/test_hp3par.py new file mode 100644 index 0000000000..8651929006 --- /dev/null +++ b/cinder/tests/test_hp3par.py @@ -0,0 +1,1044 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit tests for OpenStack Cinder volume drivers +""" +import mox +import shutil +import tempfile + +from hp3parclient import exceptions as hpexceptions + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver +from cinder.volume.drivers.san.hp import hp_3par_iscsi as hpdriver + +LOG = logging.getLogger(__name__) + +HP3PAR_DOMAIN = 'OpenStack', +HP3PAR_CPG = 'OpenStackCPG', +HP3PAR_CPG_SNAP = 'OpenStackCPGSnap' +CLI_CR = '\r\n' + + +class FakeHP3ParClient(object): + + api_url = None + debug = False + + volumes = [] + hosts = [] + vluns = [] + cpgs = [ + {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 8192}, + 'SAUsage': {'rawTotalMiB': 24576, + 'rawUsedMiB': 768, + 'totalMiB': 8192, + 'usedMiB': 256}, + 'SDGrowth': {'LDLayout': {'RAIDType': 4, + 'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 32768}, + 'SDUsage': {'rawTotalMiB': 49152, + 'rawUsedMiB': 1023, + 'totalMiB': 36864, + 'usedMiB': 768}, + 'UsrUsage': {'rawTotalMiB': 57344, + 'rawUsedMiB': 43349, + 'totalMiB': 43008, + 'usedMiB': 32512}, + 'additionalStates': [], + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'failedStates': [], + 'id': 5, + 'name': HP3PAR_CPG, + 'numFPVVs': 2, + 'numTPVVs': 0, + 'state': 1, + 'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}] + + def __init__(self, api_url): + self.api_url = api_url + self.volumes = [] + self.hosts = [] + self.vluns = [] + + def debug_rest(self, flag): + self.debug = flag + + def login(self, username, password, optional=None): + return None + + def logout(self): + return None + + def getVolumes(self): + return self.volumes + + def getVolume(self, name): + if self.volumes: + for volume in self.volumes: + if volume['name'] == name: + return volume + + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "VOLUME '%s' was not found" % name} + raise hpexceptions.HTTPNotFound(msg) + + def createVolume(self, name, cpgName, sizeMiB, optional=None): + new_vol = {'additionalStates': [], + 'adminSpace': {'freeMiB': 0, + 'rawReservedMiB': 384, + 'reservedMiB': 128, + 'usedMiB': 128}, + 'baseId': 115, + 'comment': optional['comment'], + 'copyType': 1, + 'creationTime8601': '2012-10-22T16:37:57-07:00', + 'creationTimeSec': 1350949077, + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'failedStates': [], + 'id': 115, + 'name': name, + 'policies': {'caching': True, + 'oneHost': False, + 'staleSS': True, + 'system': False, + 'zeroDetect': False}, + 'provisioningType': 1, + 'readOnly': False, + 'sizeMiB': sizeMiB, + 'snapCPG': optional['snapCPG'], + 'snapshotSpace': {'freeMiB': 0, + 'rawReservedMiB': 683, + 'reservedMiB': 512, + 'usedMiB': 512}, + 'ssSpcAllocLimitPct': 0, + 'ssSpcAllocWarningPct': 0, + 'state': 1, + 'userCPG': cpgName, + 'userSpace': {'freeMiB': 0, + 'rawReservedMiB': 41984, + 'reservedMiB': 31488, + 'usedMiB': 31488}, + 'usrSpcAllocLimitPct': 0, + 'usrSpcAllocWarningPct': 0, + 'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243', + 'wwn': '50002AC00073383D'} + self.volumes.append(new_vol) + return None + + def deleteVolume(self, name): + volume = self.getVolume(name) + self.volumes.remove(volume) + + def createSnapshot(self, name, copyOfName, optional=None): + new_snap = {'additionalStates': [], + 'adminSpace': {'freeMiB': 0, + 'rawReservedMiB': 0, + 'reservedMiB': 0, + 'usedMiB': 0}, + 'baseId': 342, + 'comment': optional['comment'], + 'copyOf': copyOfName, + 'copyType': 3, + 'creationTime8601': '2012-11-09T15:13:28-08:00', + 'creationTimeSec': 1352502808, + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'expirationTime8601': '2012-11-09T17:13:28-08:00', + 'expirationTimeSec': 1352510008, + 'failedStates': [], + 'id': 343, + 'name': name, + 'parentId': 342, + 'policies': {'caching': True, + 'oneHost': False, + 'staleSS': True, + 'system': False, + 'zeroDetect': False}, + 'provisioningType': 3, + 'readOnly': True, + 'retentionTime8601': '2012-11-09T16:13:27-08:00', + 'retentionTimeSec': 1352506407, + 'sizeMiB': 256, + 'snapCPG': HP3PAR_CPG_SNAP, + 'snapshotSpace': {'freeMiB': 0, + 'rawReservedMiB': 0, + 'reservedMiB': 0, + 'usedMiB': 0}, + 'ssSpcAllocLimitPct': 0, + 'ssSpcAllocWarningPct': 0, + 'state': 1, + 'userCPG': HP3PAR_CPG, + 'userSpace': {'freeMiB': 0, + 'rawReservedMiB': 0, + 'reservedMiB': 0, + 'usedMiB': 0}, + 'usrSpcAllocLimitPct': 0, + 'usrSpcAllocWarningPct': 0, + 'uuid': 'd7a40b8f-2511-46a8-9e75-06383c826d19', + 'wwn': '50002AC00157383D'} + self.volumes.append(new_snap) + return None + + def deleteSnapshot(self, name): + volume = self.getVolume(name) + self.volumes.remove(volume) + + def createCPG(self, name, optional=None): + cpg = {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 8192}, + 'SAUsage': {'rawTotalMiB': 24576, + 'rawUsedMiB': 768, + 'totalMiB': 8192, + 'usedMiB': 256}, + 'SDGrowth': {'LDLayout': {'RAIDType': 4, + 'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 32768}, + 'SDUsage': {'rawTotalMiB': 49152, + 'rawUsedMiB': 1023, + 'totalMiB': 36864, + 'usedMiB': 768}, + 'UsrUsage': {'rawTotalMiB': 57344, + 'rawUsedMiB': 43349, + 'totalMiB': 43008, + 'usedMiB': 32512}, + 'additionalStates': [], + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'failedStates': [], + 'id': 1, + 'name': name, + 'numFPVVs': 2, + 'numTPVVs': 0, + 'state': 1, + 'uuid': '29c214aa-62b9-41c8-b198-000000000000'} + + new_cpg = cpg.copy() + new_cpg.update(optional) + self.cpgs.append(new_cpg) + + def getCPGs(self): + return self.cpgs + + def getCPG(self, name): + if self.cpgs: + for cpg in self.cpgs: + if cpg['name'] == name: + return cpg + + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "CPG '%s' was not found" % name} + raise hpexceptions.HTTPNotFound(msg) + + def deleteCPG(self, name): + cpg = self.getCPG(name) + self.cpgs.remove(cpg) + + def createVLUN(self, volumeName, lun, hostname=None, + portPos=None, noVcn=None, + overrideLowerPriority=None): + + vlun = {'active': False, + 'failedPathInterval': 0, + 'failedPathPol': 1, + 'hostname': hostname, + 'lun': lun, + 'multipathing': 1, + 'portPos': portPos, + 'type': 4, + 'volumeName': volumeName, + 'volumeWWN': '50002AC00077383D'} + self.vluns.append(vlun) + return None + + def deleteVLUN(self, name, lunID, hostname=None, port=None): + vlun = self.getVLUN(name) + self.vluns.remove(vlun) + + def getVLUNs(self): + return self.vluns + + def getVLUN(self, volumeName): + for vlun in self.vluns: + if vlun['volumeName'] == volumeName: + return vlun + + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "VLUN '%s' was not found" % volumeName} + raise hpexceptions.HTTPNotFound(msg) + + +class HP3PARBaseDriver(): + + VOLUME_ID = "d03338a9-9115-48a3-8dfc-35cdfcdc15a7" + CLONE_ID = "d03338a9-9115-48a3-8dfc-000000000000" + VOLUME_NAME = "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7" + SNAPSHOT_ID = "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31" + SNAPSHOT_NAME = "snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31" + VOLUME_3PAR_NAME = "osv-0DM4qZEVSKON-DXN-NwVpw" + SNAPSHOT_3PAR_NAME = "oss-L4I73ONuTci9Fd4ceij-MQ" + FAKE_HOST = "fakehost" + USER_ID = '2689d9a913974c008b1d859013f23607' + PROJECT_ID = 'fac88235b9d64685a3530f73e490348f' + VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156' + FAKE_DESC = 'test description name' + FAKE_FC_PORTS = ['0987654321234', '123456789000987'] + FAKE_ISCSI_PORTS = ['10.10.10.10', '10.10.10.11'] + + volume = {'name': VOLUME_NAME, + 'id': VOLUME_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': FAKE_HOST, + 'volume_type': None, + 'volume_type_id': None} + + snapshot = {'name': SNAPSHOT_NAME, + 'id': SNAPSHOT_ID, + 'user_id': USER_ID, + 'project_id': PROJECT_ID, + 'volume_id': VOLUME_ID_SNAP, + 'volume_name': VOLUME_NAME, + 'status': 'creating', + 'progress': '0%', + 'volume_size': 2, + 'display_name': 'fakesnap', + 'display_description': FAKE_DESC} + + connector = {'ip': '10.0.0.2', + 'initiator': 'iqn.1993-08.org.debian:01:222', + 'wwpns': ["123456789012345", "123456789054321"], + 'wwnns': ["223456789012345", "223456789054321"], + 'host': 'fakehost'} + + def fake_create_client(self): + return FakeHP3ParClient(self.driver.configuration.hp3par_api_url) + + def fake_get_3par_host(self, hostname): + if hostname not in self._hosts: + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "HOST '%s' was not found" % hostname} + raise hpexceptions.HTTPNotFound(msg) + else: + return self._hosts[hostname] + + def fake_delete_3par_host(self, hostname): + if hostname not in self._hosts: + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "HOST '%s' was not found" % hostname} + raise hpexceptions.HTTPNotFound(msg) + else: + self._hosts[hostname] = None + + def fake_create_3par_vlun(self, volume, hostname): + self.driver.client.createVLUN(volume, 19, hostname) + + def fake_get_ports(self): + return {'FC': self.FAKE_FC_PORTS, 'iSCSI': self.FAKE_ISCSI_PORTS} + + def fake_copy_volume(self, src_name, dest_name): + pass + + def fake_get_volume_state(self, vol_name): + return "normal" + + def test_delete_volume(self): + self.flags(lock_path=self.tempdir) + self.driver.delete_volume(self.volume) + self.assertRaises(hpexceptions.HTTPNotFound, + self.driver.client.getVolume, + self.VOLUME_ID) + + def test_create_snapshot(self): + self.flags(lock_path=self.tempdir) + self.driver.create_snapshot(self.snapshot) + + # check to see if the snapshot was created + snap_vol = self.driver.client.getVolume(self.SNAPSHOT_3PAR_NAME) + self.assertEqual(snap_vol['name'], self.SNAPSHOT_3PAR_NAME) + + def test_delete_snapshot(self): + self.flags(lock_path=self.tempdir) + + self.driver.create_snapshot(self.snapshot) + #make sure it exists first + vol = self.driver.client.getVolume(self.SNAPSHOT_3PAR_NAME) + self.assertEqual(vol['name'], self.SNAPSHOT_3PAR_NAME) + self.driver.delete_snapshot(self.snapshot) + + # the snapshot should be deleted now + self.assertRaises(hpexceptions.HTTPNotFound, + self.driver.client.getVolume, + self.SNAPSHOT_3PAR_NAME) + + def test_create_volume_from_snapshot(self): + self.flags(lock_path=self.tempdir) + self.driver.create_volume_from_snapshot(self.volume, self.snapshot) + + snap_vol = self.driver.client.getVolume(self.VOLUME_3PAR_NAME) + self.assertEqual(snap_vol['name'], self.VOLUME_3PAR_NAME) + + volume = self.volume.copy() + volume['size'] = 1 + self.assertRaises(exception.InvalidInput, + self.driver.create_volume_from_snapshot, + volume, self.snapshot) + + def test_terminate_connection(self): + self.flags(lock_path=self.tempdir) + #setup the connections + self.driver.initialize_connection(self.volume, self.connector) + vlun = self.driver.client.getVLUN(self.VOLUME_3PAR_NAME) + self.assertEqual(vlun['volumeName'], self.VOLUME_3PAR_NAME) + self.driver.terminate_connection(self.volume, self.connector, True) + # vlun should be gone. + self.assertRaises(hpexceptions.HTTPNotFound, + self.driver.client.getVLUN, + self.VOLUME_3PAR_NAME) + + +class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase): + + _hosts = {} + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(TestHP3PARFCDriver, self).setUp() + + configuration = mox.MockObject(conf.Configuration) + configuration.hp3par_debug = False + configuration.hp3par_username = 'testUser' + configuration.hp3par_password = 'testPassword' + configuration.hp3par_api_url = 'https://1.1.1.1/api/v1' + configuration.hp3par_domain = HP3PAR_DOMAIN + configuration.hp3par_cpg = HP3PAR_CPG + configuration.hp3par_cpg_snap = HP3PAR_CPG_SNAP + configuration.iscsi_ip_address = '1.1.1.2' + configuration.iscsi_port = '1234' + configuration.san_ip = '2.2.2.2' + configuration.san_login = 'test' + configuration.san_password = 'test' + configuration.hp3par_snapshot_expiration = "" + configuration.hp3par_snapshot_retention = "" + self.stubs.Set(hpfcdriver.HP3PARFCDriver, "_create_client", + self.fake_create_client) + self.stubs.Set(hpfcdriver.HP3PARFCDriver, + "_create_3par_fibrechan_host", + self.fake_create_3par_fibrechan_host) + + self.stubs.Set(hpfcdriver.hpcommon.HP3PARCommon, "_get_3par_host", + self.fake_get_3par_host) + self.stubs.Set(hpfcdriver.hpcommon.HP3PARCommon, "_delete_3par_host", + self.fake_delete_3par_host) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_create_3par_vlun", + self.fake_create_3par_vlun) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_ports", + self.fake_get_ports) + + self.configuration = configuration + + self.driver = hpfcdriver.HP3PARFCDriver(configuration=configuration) + self.driver.do_setup(None) + + def tearDown(self): + shutil.rmtree(self.tempdir) + super(TestHP3PARFCDriver, self).tearDown() + + def fake_create_3par_fibrechan_host(self, hostname, wwn, + domain, persona_id): + host = {'FCPaths': [{'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'model': None, + 'portPos': {'cardPort': 1, 'node': 1, + 'slot': 2}, + 'vendor': None, + 'wwn': wwn[0]}, + {'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'model': None, + 'portPos': {'cardPort': 1, 'node': 0, + 'slot': 2}, + 'vendor': None, + 'wwn': wwn[1]}], + 'descriptors': None, + 'domain': domain, + 'iSCSIPaths': [], + 'id': 11, + 'name': hostname} + self._hosts[hostname] = host + + self.properties = {'data': + {'target_discovered': True, + 'target_lun': 186, + 'target_portal': '1.1.1.2:1234'}, + 'driver_volume_type': 'fibre_channel'} + + def test_create_volume(self): + self.flags(lock_path=self.tempdir) + model_update = self.driver.create_volume(self.volume) + metadata = model_update['metadata'] + self.assertFalse(metadata['3ParName'] is None) + self.assertEqual(metadata['CPG'], HP3PAR_CPG) + self.assertEqual(metadata['snapCPG'], HP3PAR_CPG_SNAP) + + def test_initialize_connection(self): + self.flags(lock_path=self.tempdir) + result = self.driver.initialize_connection(self.volume, self.connector) + self.assertEqual(result['driver_volume_type'], 'fibre_channel') + + # we should have a host and a vlun now. + host = self.fake_get_3par_host(self.FAKE_HOST) + self.assertEquals(self.FAKE_HOST, host['name']) + self.assertEquals(HP3PAR_DOMAIN, host['domain']) + vlun = self.driver.client.getVLUN(self.VOLUME_3PAR_NAME) + + self.assertEquals(self.VOLUME_3PAR_NAME, vlun['volumeName']) + self.assertEquals(self.FAKE_HOST, vlun['hostname']) + + def test_create_cloned_volume(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_get_volume_state", + self.fake_get_volume_state) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_copy_volume", + self.fake_copy_volume) + self.state_tries = 0 + volume = {'name': HP3PARBaseDriver.VOLUME_NAME, + 'id': HP3PARBaseDriver.CLONE_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': HP3PARBaseDriver.FAKE_HOST, + 'source_volid': HP3PARBaseDriver.VOLUME_ID} + src_vref = {} + model_update = self.driver.create_cloned_volume(volume, src_vref) + self.assertTrue(model_update is not None) + metadata = model_update['metadata'] + self.assertFalse(metadata['3ParName'] is None) + self.assertEqual(metadata['CPG'], HP3PAR_CPG) + self.assertEqual(metadata['snapCPG'], HP3PAR_CPG_SNAP) + + def test_get_volume_stats(self): + self.flags(lock_path=self.tempdir) + + def fake_safe_get(*args): + return "HP3PARFCDriver" + + self.stubs.Set(self.driver.configuration, 'safe_get', fake_safe_get) + stats = self.driver.get_volume_stats(True) + self.assertEquals(stats['storage_protocol'], 'FC') + self.assertEquals(stats['total_capacity_gb'], 'infinite') + self.assertEquals(stats['free_capacity_gb'], 'infinite') + + #modify the CPG to have a limit + old_cpg = self.driver.client.getCPG(HP3PAR_CPG) + options = {'SDGrowth': {'limitMiB': 8192}} + self.driver.client.deleteCPG(HP3PAR_CPG) + self.driver.client.createCPG(HP3PAR_CPG, options) + + const = 0.0009765625 + stats = self.driver.get_volume_stats(True) + self.assertEquals(stats['storage_protocol'], 'FC') + total_capacity_gb = 8192 * const + self.assertEquals(stats['total_capacity_gb'], total_capacity_gb) + free_capacity_gb = int((8192 - old_cpg['UsrUsage']['usedMiB']) * const) + self.assertEquals(stats['free_capacity_gb'], free_capacity_gb) + self.driver.client.deleteCPG(HP3PAR_CPG) + self.driver.client.createCPG(HP3PAR_CPG, {}) + + def test_create_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack('no hosts listed'), '']) + + create_host_cmd = ('createhost -persona 1 -domain (\'OpenStack\',) ' + 'fakehost 123456789012345 123456789054321') + _run_ssh(create_host_cmd, False).AndReturn([CLI_CR, '']) + + _run_ssh(show_host_cmd, False).AndReturn([pack(FC_HOST_RET), '']) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + + def test_create_invalid_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack('no hosts listed'), '']) + + create_host_cmd = ('createhost -persona 1 -domain (\'OpenStack\',) ' + 'fakehost 123456789012345 123456789054321') + create_host_ret = pack(CLI_CR + 'already used by host fakehost.foo ') + _run_ssh(create_host_cmd, False).AndReturn([create_host_ret, '']) + self.mox.ReplayAll() + + self.assertRaises(exception.Duplicate3PARHost, + self.driver._create_host, + self.volume, + self.connector) + + def test_create_modify_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack(NO_FC_HOST_RET), '']) + + create_host_cmd = ('createhost -add fakehost ' + '123456789012345 123456789054321') + _run_ssh(create_host_cmd, False).AndReturn([CLI_CR, '']) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack(FC_HOST_RET), '']) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + + +class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): + + TARGET_IQN = "iqn.2000-05.com.3pardata:21810002ac00383d" + + _hosts = {} + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(TestHP3PARISCSIDriver, self).setUp() + + configuration = mox.MockObject(conf.Configuration) + configuration.hp3par_debug = False + configuration.hp3par_username = 'testUser' + configuration.hp3par_password = 'testPassword' + configuration.hp3par_api_url = 'https://1.1.1.1/api/v1' + configuration.hp3par_domain = HP3PAR_DOMAIN + configuration.hp3par_cpg = HP3PAR_CPG + configuration.hp3par_cpg_snap = HP3PAR_CPG_SNAP + configuration.iscsi_ip_address = '1.1.1.2' + configuration.iscsi_port = '1234' + configuration.san_ip = '2.2.2.2' + configuration.san_login = 'test' + configuration.san_password = 'test' + configuration.hp3par_snapshot_expiration = "" + configuration.hp3par_snapshot_retention = "" + + self.stubs.Set(hpdriver.HP3PARISCSIDriver, "_create_client", + self.fake_create_client) + self.stubs.Set(hpdriver.HP3PARISCSIDriver, + "_iscsi_discover_target_iqn", + self.fake_iscsi_discover_target_iqn) + self.stubs.Set(hpdriver.HP3PARISCSIDriver, "_create_3par_iscsi_host", + self.fake_create_3par_iscsi_host) + self.stubs.Set(hpdriver.HP3PARISCSIDriver, + "_iscsi_discover_target_iqn", + self.fake_iscsi_discover_target_iqn) + + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_get_3par_host", + self.fake_get_3par_host) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_delete_3par_host", + self.fake_delete_3par_host) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_create_3par_vlun", + self.fake_create_3par_vlun) + + self.driver = hpdriver.HP3PARISCSIDriver(configuration=configuration) + self.driver.do_setup(None) + + target_iqn = 'iqn.2000-05.com.3pardata:21810002ac00383d' + self.properties = {'data': + {'target_discovered': True, + 'target_iqn': target_iqn, + 'target_lun': 186, + 'target_portal': '1.1.1.2:1234'}, + 'driver_volume_type': 'iscsi'} + + def tearDown(self): + shutil.rmtree(self.tempdir) + self._hosts = {} + super(TestHP3PARISCSIDriver, self).tearDown() + + def fake_iscsi_discover_target_iqn(self, ip_address): + return self.TARGET_IQN + + def fake_create_3par_iscsi_host(self, hostname, iscsi_iqn, + domain, persona_id): + host = {'FCPaths': [], + 'descriptors': None, + 'domain': domain, + 'iSCSIPaths': [{'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'ipAddr': '10.10.221.59', + 'model': None, + 'name': iscsi_iqn, + 'portPos': {'cardPort': 1, 'node': 1, + 'slot': 8}, + 'vendor': None}], + 'id': 11, + 'name': hostname} + self._hosts[hostname] = host + + def test_create_volume(self): + self.flags(lock_path=self.tempdir) + model_update = self.driver.create_volume(self.volume) + metadata = model_update['metadata'] + self.assertFalse(metadata['3ParName'] is None) + self.assertEqual(metadata['CPG'], HP3PAR_CPG) + self.assertEqual(metadata['snapCPG'], HP3PAR_CPG_SNAP) + + def test_initialize_connection(self): + self.flags(lock_path=self.tempdir) + result = self.driver.initialize_connection(self.volume, self.connector) + self.assertEqual(result['driver_volume_type'], 'iscsi') + self.assertEqual(result['data']['target_iqn'], + self.properties['data']['target_iqn']) + self.assertEqual(result['data']['target_portal'], + self.properties['data']['target_portal']) + self.assertEqual(result['data']['target_discovered'], + self.properties['data']['target_discovered']) + + # we should have a host and a vlun now. + host = self.fake_get_3par_host(self.FAKE_HOST) + self.assertEquals(self.FAKE_HOST, host['name']) + self.assertEquals(HP3PAR_DOMAIN, host['domain']) + vlun = self.driver.client.getVLUN(self.VOLUME_3PAR_NAME) + + self.assertEquals(self.VOLUME_3PAR_NAME, vlun['volumeName']) + self.assertEquals(self.FAKE_HOST, vlun['hostname']) + + def test_create_cloned_volume(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_get_volume_state", + self.fake_get_volume_state) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_copy_volume", + self.fake_copy_volume) + self.state_tries = 0 + volume = {'name': HP3PARBaseDriver.VOLUME_NAME, + 'id': HP3PARBaseDriver.CLONE_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': HP3PARBaseDriver.FAKE_HOST, + 'source_volid': HP3PARBaseDriver.VOLUME_ID} + src_vref = {} + model_update = self.driver.create_cloned_volume(volume, src_vref) + self.assertTrue(model_update is not None) + metadata = model_update['metadata'] + self.assertFalse(metadata['3ParName'] is None) + self.assertEqual(metadata['CPG'], HP3PAR_CPG) + self.assertEqual(metadata['snapCPG'], HP3PAR_CPG_SNAP) + + def test_get_volume_stats(self): + self.flags(lock_path=self.tempdir) + + def fake_safe_get(*args): + return "HP3PARFCDriver" + + self.stubs.Set(self.driver.configuration, 'safe_get', fake_safe_get) + stats = self.driver.get_volume_stats(True) + self.assertEquals(stats['storage_protocol'], 'iSCSI') + self.assertEquals(stats['total_capacity_gb'], 'infinite') + self.assertEquals(stats['free_capacity_gb'], 'infinite') + + #modify the CPG to have a limit + old_cpg = self.driver.client.getCPG(HP3PAR_CPG) + options = {'SDGrowth': {'limitMiB': 8192}} + self.driver.client.deleteCPG(HP3PAR_CPG) + self.driver.client.createCPG(HP3PAR_CPG, options) + + const = 0.0009765625 + stats = self.driver.get_volume_stats(True) + self.assertEquals(stats['storage_protocol'], 'iSCSI') + total_capacity_gb = 8192 * const + self.assertEquals(stats['total_capacity_gb'], total_capacity_gb) + free_capacity_gb = int((8192 - old_cpg['UsrUsage']['usedMiB']) * const) + self.assertEquals(stats['free_capacity_gb'], free_capacity_gb) + self.driver.client.deleteCPG(HP3PAR_CPG) + self.driver.client.createCPG(HP3PAR_CPG, {}) + + def test_create_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack('no hosts listed'), '']) + + create_host_cmd = ('createhost -iscsi -persona 1 -domain ' + '(\'OpenStack\',) ' + 'fakehost iqn.1993-08.org.debian:01:222') + _run_ssh(create_host_cmd, False).AndReturn([CLI_CR, '']) + + _run_ssh(show_host_cmd, False).AndReturn([pack(ISCSI_HOST_RET), '']) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + + def test_create_invalid_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack('no hosts listed'), '']) + + create_host_cmd = ('createhost -iscsi -persona 1 -domain ' + '(\'OpenStack\',) ' + 'fakehost iqn.1993-08.org.debian:01:222') + in_use_ret = pack('\r\nalready used by host fakehost.foo ') + _run_ssh(create_host_cmd, False).AndReturn([in_use_ret, '']) + self.mox.ReplayAll() + + self.assertRaises(exception.Duplicate3PARHost, + self.driver._create_host, + self.volume, + self.connector) + + def test_create_modify_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_host_cmd = 'showhost -verbose fakehost' + _run_ssh(show_host_cmd, False).AndReturn([pack(ISCSI_NO_HOST_RET), '']) + + create_host_cmd = ('createhost -iscsi -add fakehost ' + 'iqn.1993-08.org.debian:01:222') + _run_ssh(create_host_cmd, False).AndReturn([CLI_CR, '']) + _run_ssh(show_host_cmd, False).AndReturn([pack(ISCSI_HOST_RET), '']) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + + def test_iscsi_discover_target_iqn(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_port_cmd = 'showport -ids' + _run_ssh(show_port_cmd, False).AndReturn([pack(ISCSI_PORT_IDS_RET), + '']) + self.mox.ReplayAll() + + iqn = self.driver._iscsi_discover_target_iqn('10.10.120.253') + self.assertEqual(iqn, self.TARGET_IQN) + + def test_get_volume_state(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_vv_cmd = ('showvv -state ' + 'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7') + _run_ssh(show_vv_cmd, False).AndReturn([pack(VOLUME_STATE_RET), '']) + self.mox.ReplayAll() + + status = self.driver.common._get_volume_state(self.VOLUME_NAME) + self.assertEqual(status, 'normal') + + def test_get_ports(self): + self.flags(lock_path=self.tempdir) + + #record + self.stubs.UnsetAll() + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + show_port_cmd = 'showport' + _run_ssh(show_port_cmd, False).AndReturn([pack(PORT_RET), '']) + + show_port_i_cmd = 'showport -iscsi' + _run_ssh(show_port_i_cmd, False).AndReturn([pack(ISCSI_PORT_RET), '']) + self.mox.ReplayAll() + + ports = self.driver.common.get_ports() + self.assertEqual(ports['FC'][0], '20210002AC00383D') + + +def pack(arg): + header = '\r\n\r\n\r\n\r\n\r\n' + footer = '\r\n\r\n\r\n' + return header + arg + footer + +FC_HOST_RET = ( + 'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n' + '75,fakehost,Generic,50014380242B8B4C,0:2:1,n/a\r\n' + '75,fakehost,Generic,50014380242B8B4E,---,n/a\r\n' + '75,fakehost,Generic,1000843497F90711,0:2:1,n/a \r\n' + '75,fakehost,Generic,1000843497F90715,1:2:1,n/a\r\n' + '\r\n' + 'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n' + '75,fakehost,--,--\r\n' + '\r\n' + '---------- Host fakehost ----------\r\n' + 'Name : fakehost\r\n' + 'Domain : FAKE_TEST\r\n' + 'Id : 75\r\n' + 'Location : --\r\n' + 'IP Address : --\r\n' + 'OS : --\r\n' + 'Model : --\r\n' + 'Contact : --\r\n' + 'Comment : -- \r\n\r\n\r\n') + +NO_FC_HOST_RET = ( + 'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n' + '\r\n' + 'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n' + '75,fakehost,--,--\r\n' + '\r\n' + '---------- Host fakehost ----------\r\n' + 'Name : fakehost\r\n' + 'Domain : FAKE_TEST\r\n' + 'Id : 75\r\n' + 'Location : --\r\n' + 'IP Address : --\r\n' + 'OS : --\r\n' + 'Model : --\r\n' + 'Contact : --\r\n' + 'Comment : -- \r\n\r\n\r\n') + +ISCSI_HOST_RET = ( + 'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n' + '75,fakehost,Generic,iqn.1993-08.org.debian:01:222,---,10.10.222.12\r\n' + '\r\n' + 'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n' + '75,fakehost,--,--\r\n' + '\r\n' + '---------- Host fakehost ----------\r\n' + 'Name : fakehost\r\n' + 'Domain : FAKE_TEST\r\n' + 'Id : 75\r\n' + 'Location : --\r\n' + 'IP Address : --\r\n' + 'OS : --\r\n' + 'Model : --\r\n' + 'Contact : --\r\n' + 'Comment : -- \r\n\r\n\r\n') + +ISCSI_NO_HOST_RET = ( + 'Id,Name,Persona,-WWN/iSCSI_Name-,Port,IP_addr\r\n' + '\r\n' + 'Id,Name,-Initiator_CHAP_Name-,-Target_CHAP_Name-\r\n' + '75,fakehost,--,--\r\n' + '\r\n' + '---------- Host fakehost ----------\r\n' + 'Name : fakehost\r\n' + 'Domain : FAKE_TEST\r\n' + 'Id : 75\r\n' + 'Location : --\r\n' + 'IP Address : --\r\n' + 'OS : --\r\n' + 'Model : --\r\n' + 'Contact : --\r\n' + 'Comment : -- \r\n\r\n\r\n') + +ISCSI_PORT_IDS_RET = ( + 'N:S:P,-Node_WWN/IPAddr-,-----------Port_WWN/iSCSI_Name-----------\r\n' + '0:2:1,28210002AC00383D,20210002AC00383D\r\n' + '0:2:2,2FF70002AC00383D,20220002AC00383D\r\n' + '0:2:3,2FF70002AC00383D,20230002AC00383D\r\n' + '0:2:4,2FF70002AC00383D,20240002AC00383D\r\n' + '0:5:1,2FF70002AC00383D,20510002AC00383D\r\n' + '0:5:2,2FF70002AC00383D,20520002AC00383D\r\n' + '0:5:3,2FF70002AC00383D,20530002AC00383D\r\n' + '0:5:4,2FF70202AC00383D,20540202AC00383D\r\n' + '0:6:4,2FF70002AC00383D,20640002AC00383D\r\n' + '0:8:1,10.10.120.253,iqn.2000-05.com.3pardata:21810002ac00383d\r\n' + '0:8:2,0.0.0.0,iqn.2000-05.com.3pardata:20820002ac00383d\r\n' + '1:2:1,29210002AC00383D,21210002AC00383D\r\n' + '1:2:2,2FF70002AC00383D,21220002AC00383D\r\n' + '-----------------------------------------------------------------\r\n') + +VOLUME_STATE_RET = ( + 'Id,Name,Prov,Type,State,-Detailed_State-\r\n' + '410,volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7,snp,vcopy,normal,' + 'normal\r\n' + '-----------------------------------------------------------------\r\n') + +PORT_RET = ( + 'N:S:P,Mode,State,----Node_WWN----,-Port_WWN/HW_Addr-,Type,Protocol,' + 'Label,Partner,FailoverState\r\n' + '0:2:1,target,ready,28210002AC00383D,20210002AC00383D,host,FC,' + '-,1:2:1,none\r\n' + '0:2:2,initiator,loss_sync,2FF70002AC00383D,20220002AC00383D,free,FC,' + '-,-,-\r\n' + '0:2:3,initiator,loss_sync,2FF70002AC00383D,20230002AC00383D,free,FC,' + '-,-,-\r\n' + '0:2:4,initiator,loss_sync,2FF70002AC00383D,20240002AC00383D,free,FC,' + '-,-,-\r\n' + '0:5:1,initiator,loss_sync,2FF70002AC00383D,20510002AC00383D,free,FC,' + '-,-,-\r\n' + '0:5:2,initiator,loss_sync,2FF70002AC00383D,20520002AC00383D,free,FC,' + '-,-,-\r\n' + '0:5:3,initiator,loss_sync,2FF70002AC00383D,20530002AC00383D,free,FC,' + '-,-,-\r\n' + '0:5:4,initiator,ready,2FF70202AC00383D,20540202AC00383D,host,FC,' + '-,1:5:4,active\r\n' + '0:6:1,initiator,ready,2FF70002AC00383D,20610002AC00383D,disk,FC,' + '-,-,-\r\n' + '0:6:2,initiator,ready,2FF70002AC00383D,20620002AC00383D,disk,FC,' + '-,-,-\r\n') + +ISCSI_PORT_RET = ( + 'N:S:P,State,IPAddr,Netmask,Gateway,TPGT,MTU,Rate,DHCP,iSNS_Addr,' + 'iSNS_Port\r\n' + '0:8:1,ready,10.10.120.253,255.255.224.0,0.0.0.0,81,1500,10Gbps,' + '0,0.0.0.0,3205\r\n' + '0:8:2,loss_sync,0.0.0.0,0.0.0.0,0.0.0.0,82,1500,n/a,0,0.0.0.0,3205\r\n' + '1:8:1,ready,10.10.220.253,255.255.224.0,0.0.0.0,181,1500,10Gbps,' + '0,0.0.0.0,3205\r\n' + '1:8:2,loss_sync,0.0.0.0,0.0.0.0,0.0.0.0,182,1500,n/a,0,0.0.0.0,3205\r\n') diff --git a/cinder/tests/test_huawei.py b/cinder/tests/test_huawei.py new file mode 100644 index 0000000000..0ff2c0115a --- /dev/null +++ b/cinder/tests/test_huawei.py @@ -0,0 +1,859 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for HUAWEI volume driver. +""" +import mox +import os +import shutil +import tempfile +from xml.dom.minidom import Document +from xml.etree import ElementTree as ET + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.huawei import huawei_iscsi + +LOG = logging.getLogger(__name__) + +LUNInfo = {'ID': None, + 'Name': None, + 'Size': None, + 'LUN WWN': None, + 'Status': None, + 'Visible Capacity': None, + 'Stripe Unit Size': None, + 'Disk Pool ID': None, + 'Format Progress': None, + 'Cache Prefetch Strategy': None, + 'LUNType': None, + 'Cache Write Strategy': None, + 'Running Cache Write Strategy': None, + 'Consumed Capacity': None, + 'Pool ID': None, + 'SnapShot ID': None, + 'LunCopy ID': None, + 'Whether Private LUN': None, + 'Remote Replication ID': None, + 'Split mirror ID': None, + 'Owner Controller': None, + 'Worker Controller': None, + 'RAID Group ID': None} + +LUNInfoCopy = {'ID': None, + 'Name': None, + 'Size': None, + 'LUN WWN': None, + 'Status': None, + 'Visible Capacity': None, + 'Stripe Unit Size': None, + 'Disk Pool ID': None, + 'Format Progress': None, + 'Cache Prefetch Strategy': None, + 'LUNType': None, + 'Cache Write Strategy': None, + 'Running Cache Write Strategy': None, + 'Consumed Capacity': None, + 'Pool ID': None, + 'SnapShot ID': None, + 'LunCopy ID': None, + 'Whether Private LUN': None, + 'Remote Replication ID': None, + 'Split mirror ID': None, + 'Owner Controller': None, + 'Worker Controller': None, + 'RAID Group ID': None} + +SnapshotInfo = {'Source LUN ID': None, + 'Source LUN Name': None, + 'ID': None, + 'Name': None, + 'Type': 'Public', + 'Status': None, + 'Time Stamp': '2013-01-15 14:00:00', + 'Rollback Start Time': '--', + 'Rollback End Time': '--', + 'Rollback Speed': '--', + 'Rollback Progress': '--'} + +MapInfo = {'Host Group ID': None, + 'Host Group Name': None, + 'File Engine Cluster': None, + 'Host ID': None, + 'Host Name': None, + 'Os Type': None, + 'INI Port ID': None, + 'INI Port Name': None, + 'INI Port Info': None, + 'Port Type': None, + 'Link Status': None, + 'LUN WWN': None, + 'DEV LUN ID': None, + 'Host LUN ID': None} + +HostPort = {'ID': None, + 'Name': None, + 'Info': None} + +LUNCopy = {'Name': None, + 'ID': None, + 'Type': None, + 'State': None, + 'Status': 'Disable'} + +FakeVolume = {'name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe', + 'size': '2', + 'id': '0', + 'wwn': '630303710030303701094b2b00000031', + 'provider_auth': None} + +FakeVolumeCopy = {'name': 'Volume-jeje34fe-223f-dd33-4423-asdfghjklqwg', + 'size': '3', + 'ID': '1', + 'wwn': '630303710030303701094b2b0000003'} + +FakeLUNCopy = {'ID': '1', + 'Type': 'FULL', + 'State': 'Created', + 'Status': 'Normal'} + +FakeSnapshot = {'name': 'keke34fe-223f-dd33-4423-asdfghjklqwf', + 'volume_name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe', + 'id': '3'} + +FakePoolInfo = {'ID': '2', + 'Level': 'RAID6', + 'Status': 'Normal', + 'Free Capacity': '10240', + 'Disk List': '0,1;0,2;0,3;0,4;0,5;0,6', + 'Name': 'RAID_001', + 'Type': 'Thick'} + +FakeConfInfo = {'HostGroup': 'HostGroup_OpenStack', + 'HostnamePrefix': 'Host_', + 'DefaultTargetIP': '192.168.100.1', + 'TargetIQN': 'iqn.2006-08.com.huawei:oceanspace:2103037:', + 'TargetIQN-T': 'iqn.2006-08.com.huawei:oceanspace:2103037::' + '20001:192.168.100.2', + 'TargetIQN-Dorado5100': 'iqn.2006-08.com.huawei:oceanspace:' + '2103037::192.168.100.2', + 'TargetIQN-Dorado2100G2': 'iqn.2006-08.com.huawei:oceanspace:' + '2103037::192.168.100.2-20001', + 'Initiator Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', + 'Initiator TargetIP': '192.168.100.2'} + +FakeConnector = {'initiator': "iqn.1993-08.debian:01:ec2bff7ac3a3"} + + +class HuaweiVolumeTestCase(test.TestCase): + + def __init__(self, *args, **kwargs): + super(HuaweiVolumeTestCase, self).__init__(*args, **kwargs) + + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + self._create_fake_conf_file() + configuration = mox.MockObject(conf.Configuration) + configuration.cinder_huawei_conf_file = self.fake_conf_file + configuration.append_config_values(mox.IgnoreArg()) + self.driver = FakeHuaweiStorage(configuration=configuration) + + self.driver.do_setup({}) + + def setUp(self): + super(HuaweiVolumeTestCase, self).setUp() + self.driver._test_flg = 'check_for_fail' + self._test_check_for_setup_errors() + + def test_create_export_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_export, + {}, FakeVolume) + + def test_delete_volume_failed(self): + self._test_delete_volume() + + def test_create_snapshot_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, + FakeSnapshot) + + def test_delete_snapshot_failed(self): + self._test_delete_snapshot() + + def test_create_luncopy_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FakeVolumeCopy, FakeSnapshot) + + def test_initialize_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FakeVolume, FakeConnector) + + def test_terminate_connection_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + FakeVolume, FakeConnector) + + def test_normal(self): + # test for T Series + self.driver._test_flg = 'check_for_T' + self._test_check_for_setup_errors() + self._test_create_volume() + self._test_create_export() + self._test_create_snapshot() + self._test_create_volume_from_snapshot() + self._test_initialize_connection_for_T() + self._test_terminate_connection() + self._test_delete_snapshot() + self._test_delete_volume() + self._test_get_get_volume_stats() + + # test for Dorado2100 G2 + self.driver._test_flg = 'check_for_Dorado2100G2' + self._test_check_for_setup_errors() + self._test_create_volume() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, + FakeSnapshot) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FakeVolumeCopy, FakeSnapshot) + self._test_initialize_connection_for_Dorado2100G2() + self._test_terminate_connection() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_snapshot, + FakeSnapshot) + self._test_delete_volume() + + # test for Dorado5100 + self.driver._test_flg = 'check_for_Dorado5100' + self._test_check_for_setup_errors() + self._test_create_volume() + self._test_create_snapshot() + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FakeVolumeCopy, FakeSnapshot) + self._test_initialize_connection_for_Dorado5100() + self._test_terminate_connection() + self._test_delete_snapshot() + self._test_delete_volume() + + def cleanup(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + + def _create_fake_conf_file(self): + doc = Document() + + config = doc.createElement('config') + doc.appendChild(config) + + storage = doc.createElement('Storage') + config.appendChild(storage) + controllerip0 = doc.createElement('ControllerIP0') + controllerip0_text = doc.createTextNode('10.10.10.1') + controllerip0.appendChild(controllerip0_text) + storage.appendChild(controllerip0) + controllerip1 = doc.createElement('ControllerIP1') + controllerip1_text = doc.createTextNode('10.10.10.2') + controllerip1.appendChild(controllerip1_text) + storage.appendChild(controllerip1) + username = doc.createElement('UserName') + username_text = doc.createTextNode('admin') + username.appendChild(username_text) + storage.appendChild(username) + userpassword = doc.createElement('UserPassword') + userpassword_text = doc.createTextNode('123456') + userpassword.appendChild(userpassword_text) + storage.appendChild(userpassword) + + lun = doc.createElement('LUN') + config.appendChild(lun) + storagepool = doc.createElement('StoragePool') + storagepool.setAttribute('Name', 'RAID_001') + lun.appendChild(storagepool) + storagepool = doc.createElement('StoragePool') + storagepool.setAttribute('Name', 'RAID_002') + lun.appendChild(storagepool) + + iscsi = doc.createElement('iSCSI') + config.appendChild(iscsi) + defaulttargetip = doc.createElement('DefaultTargetIP') + defaulttargetip_text = doc.createTextNode('192.168.100.1') + defaulttargetip.appendChild(defaulttargetip_text) + iscsi.appendChild(defaulttargetip) + initiator = doc.createElement('Initiator') + initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') + initiator.setAttribute('TargetIP', '192.168.100.2') + iscsi.appendChild(initiator) + + file = open(self.fake_conf_file, 'w') + file.write(doc.toprettyxml(indent='')) + file.close() + + def _test_check_for_setup_errors(self): + self.driver.check_for_setup_error() + + def _test_create_volume(self): + self.driver.create_volume(FakeVolume) + self.assertNotEqual(LUNInfo["ID"], None) + self.assertEqual(LUNInfo["RAID Group ID"], FakePoolInfo['ID']) + + def _test_delete_volume(self): + self.driver.delete_volume(FakeVolume) + self.assertEqual(LUNInfo["ID"], None) + + def _test_create_snapshot(self): + self.driver.create_snapshot(FakeSnapshot) + self.assertNotEqual(SnapshotInfo["ID"], None) + self.assertNotEqual(LUNInfo["ID"], None) + self.assertEqual(SnapshotInfo["Status"], 'Active') + self.assertEqual(SnapshotInfo["Source LUN ID"], LUNInfo["ID"]) + + def _test_delete_snapshot(self): + self.driver.delete_snapshot(FakeSnapshot) + self.assertEqual(SnapshotInfo["ID"], None) + + def _test_create_volume_from_snapshot(self): + self.driver.create_volume_from_snapshot(FakeVolumeCopy, FakeSnapshot) + self.assertNotEqual(LUNInfoCopy["ID"], None) + + def _test_create_export(self): + retval = self.driver.create_export({}, FakeVolume) + self.assertNotEqual(retval, FakeVolume["id"]) + + def _test_initialize_connection_for_T(self): + connection_data = self.driver.initialize_connection(FakeVolume, + FakeConnector) + iscsi_properties = connection_data['data'] + + self.assertEquals(iscsi_properties['target_iqn'], + FakeConfInfo['TargetIQN-T']) + self.assertEquals(iscsi_properties['target_portal'], + FakeConfInfo['Initiator TargetIP'] + ':3260') + self.assertEqual(MapInfo["DEV LUN ID"], FakeVolume['id']) + self.assertEqual(MapInfo["INI Port Info"], + FakeConnector['initiator']) + + def _test_initialize_connection_for_Dorado2100G2(self): + connection_data = self.driver.initialize_connection(FakeVolume, + FakeConnector) + iscsi_properties = connection_data['data'] + + self.assertEquals(iscsi_properties['target_iqn'], + FakeConfInfo['TargetIQN-Dorado2100G2']) + self.assertEquals(iscsi_properties['target_portal'], + FakeConfInfo['Initiator TargetIP'] + ':3260') + self.assertEqual(MapInfo["DEV LUN ID"], FakeVolume['id']) + self.assertEqual(MapInfo["INI Port Info"], + FakeConnector['initiator']) + + def _test_initialize_connection_for_Dorado5100(self): + connection_data = self.driver.initialize_connection(FakeVolume, + FakeConnector) + iscsi_properties = connection_data['data'] + + self.assertEquals(iscsi_properties['target_iqn'], + FakeConfInfo['TargetIQN-Dorado5100']) + self.assertEquals(iscsi_properties['target_portal'], + FakeConfInfo['Initiator TargetIP'] + ':3260') + self.assertEqual(MapInfo["DEV LUN ID"], FakeVolume['id']) + self.assertEqual(MapInfo["INI Port Info"], + FakeConnector['initiator']) + + def _test_terminate_connection(self): + self.driver.terminate_connection(FakeVolume, FakeConnector) + self.assertEqual(MapInfo["DEV LUN ID"], None) + self.assertEqual(MapInfo["Host LUN ID"], None) + self.assertEqual(MapInfo["INI Port Info"], None) + + def _test_get_get_volume_stats(self): + stats = self.driver.get_volume_stats(True) + + fakecapacity = float(FakePoolInfo['Free Capacity']) / 1024 + self.assertEqual(stats['free_capacity_gb'], fakecapacity) + + +class FakeHuaweiStorage(huawei_iscsi.HuaweiISCSIDriver): + """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" + + def __init__(self, *args, **kwargs): + super(FakeHuaweiStorage, self).__init__(*args, **kwargs) + self._test_flg = None + + def _execute_cli(self, cmdIn): + cmd = cmdIn.split(' ')[0].lower() + if cmd == 'showsys': + if ((self._test_flg == 'check_for_fail') or + (self._test_flg == 'check_for_T')): + out = """/>showsys +========================================================================== + System Information +-------------------------------------------------------------------------- + System Name | SN_S5500T-xu-0123456789 + Device Type | Oceanstor S5500T + Current System Mode | Double Controllers Normal + Mirroring Link Status | Link Up + Location | + Time | 2013-01-01 01:01:01 + Product Version | V100R005C00 +=========================================================================== +""" + elif self._test_flg == 'check_for_Dorado2100G2': + out = """/>showsys +========================================================================== + System Information +-------------------------------------------------------------------------- + System Name | SN_Dorado2100_G2 + Device Type | Oceanstor Dorado2100 G2 + Current System Mode | Double Controllers Normal + Mirroring Link Status | Link Up + Location | + Time | 2013-01-01 01:01:01 + Product Version | V100R001C00 +=========================================================================== +""" + elif self._test_flg == 'check_for_Dorado5100': + out = """/>showsys +========================================================================== + System Information +-------------------------------------------------------------------------- + System Name | SN_Dorado5100 + Device Type | Oceanstor Dorado5100 + Current System Mode | Double Controllers Normal + Mirroring Link Status | Link Up + Location | + Time | 2013-01-01 01:01:01 + Product Version | V100R001C00 +=========================================================================== +""" + elif cmd == 'addhostmap': + MapInfo['DEV LUN ID'] = LUNInfo['ID'] + MapInfo['LUN WWN'] = LUNInfo['LUN WWN'] + MapInfo['Host LUN ID'] = '0' + out = 'command operates successfully' + + elif cmd == 'showhostmap': + if MapInfo['DEV LUN ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showhostmap +========================================================================== + Map Information +-------------------------------------------------------------------------- + Map ID Working Controller Dev LUN ID LUN WWN Host LUN ID Mapped to \ + RAID ID Dev LUN Cap(MB) Map Type Whether Command LUN Pool ID +--------------------------------------------------------------------------- + 2147483649 %s %s %s %s Host: %s %s %s HOST No -- +=========================================================================== +""" % (LUNInfo['Worker Controller'], LUNInfo['ID'], LUNInfo['LUN WWN'], + MapInfo['Host ID'], MapInfo['Host ID'], LUNInfo['RAID Group ID'], + str(int(LUNInfo['Size']) * 1024)) + + elif cmd == 'delhostmap': + MapInfo['DEV LUN ID'] = None + MapInfo['LUN WWN'] = None + MapInfo['Host LUN ID'] = None + out = 'command operates successfully' + + elif cmd == 'createsnapshot': + SnapshotInfo['Source LUN ID'] = LUNInfo['ID'] + SnapshotInfo['Source LUN Name'] = LUNInfo['Name'] + SnapshotInfo['ID'] = FakeSnapshot['id'] + SnapshotInfo['Name'] = self._name_translate(FakeSnapshot['name']) + SnapshotInfo['Status'] = 'Disable' + out = 'command operates successfully' + + elif cmd == 'actvsnapshot': + SnapshotInfo['Status'] = 'Active' + out = 'command operates successfully' + + elif cmd == 'disablesnapshot': + SnapshotInfo['Status'] = 'Disable' + out = 'command operates successfully' + + elif cmd == 'delsnapshot': + SnapshotInfo['Source LUN ID'] = None + SnapshotInfo['Source LUN Name'] = None + SnapshotInfo['ID'] = None + SnapshotInfo['Name'] = None + SnapshotInfo['Status'] = None + out = 'command operates successfully' + + elif cmd == 'showsnapshot': + if SnapshotInfo['ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showsnapshot +========================================================================== + Snapshot Information +-------------------------------------------------------------------------- + Name ID Type Status Time Stamp +-------------------------------------------------------------------------- + %s %s Public %s 2013-01-15 14:21:13 +========================================================================== +""" % (SnapshotInfo['Name'], SnapshotInfo['ID'], SnapshotInfo['Status']) + + elif cmd == 'showlunsnapshot': + if SnapshotInfo['ID'] is None: + out = """Current LUN is not a source LUN""" + else: + out = """/>showlunsnapshot -lun 2 +========================================================================== + Snapshot of LUN +-------------------------------------------------------------------------- + Name ID Type Status Time Stamp +-------------------------------------------------------------------------- + %s %s Public %s 2013-01-15 14:17:19 +========================================================================== +""" % (SnapshotInfo['Name'], SnapshotInfo['ID'], SnapshotInfo['Status']) + + elif cmd == 'createlun': + if LUNInfo['ID'] is None: + LUNInfo['Name'] = self._name_translate(FakeVolume['name']) + LUNInfo['ID'] = FakeVolume['id'] + LUNInfo['Size'] = FakeVolume['size'] + LUNInfo['LUN WWN'] = FakeVolume['wwn'] + LUNInfo['Owner Controller'] = 'A' + LUNInfo['Worker Controller'] = 'A' + LUNInfo['RAID Group ID'] = FakePoolInfo['ID'] + else: + LUNInfoCopy['Name'] = \ + self._name_translate(FakeVolumeCopy['name']) + LUNInfoCopy['ID'] = FakeVolumeCopy['ID'] + LUNInfoCopy['Size'] = FakeVolumeCopy['size'] + LUNInfoCopy['LUN WWN'] = FakeVolumeCopy['wwn'] + LUNInfoCopy['Owner Controller'] = 'A' + LUNInfoCopy['Worker Controller'] = 'A' + LUNInfoCopy['RAID Group ID'] = FakePoolInfo['ID'] + out = 'command operates successfully' + + elif cmd == 'dellun': + LUNInfo['Name'] = None + LUNInfo['ID'] = None + LUNInfo['Size'] = None + LUNInfo['LUN WWN'] = None + LUNInfo['Owner Controller'] = None + LUNInfo['Worker Controller'] = None + LUNInfo['RAID Group ID'] = None + out = 'command operates successfully' + + elif cmd == 'showlun': + if LUNInfo['ID'] is None: + out = 'command operates successfully, but no information.' + elif LUNInfoCopy['ID'] is None: + if ((self._test_flg == 'check_for_fail') or + (self._test_flg == 'check_for_T')): + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB) \ + LUN Name Stripe Unit Size(KB) Lun Type +--------------------------------------------------------------------------- + %s %s -- Normal %s %s %s 64 THICK +=========================================================================== +""" % (LUNInfo['ID'], LUNInfo['RAID Group ID'], LUNInfo['Owner Controller'], + str(int(LUNInfo['Size']) * 1024), LUNInfo['Name']) + elif self._test_flg == 'check_for_Dorado2100G2': + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID Status Controller Visible Capacity(MB) LUN Name Lun Type +--------------------------------------------------------------------------- + %s Normal %s %s %s THICK +=========================================================================== +""" % (LUNInfo['ID'], LUNInfo['Owner Controller'], + str(int(LUNInfo['Size']) * 1024), LUNInfo['Name']) + elif self._test_flg == 'check_for_Dorado5100': + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name + Strip Unit Size(KB) Lun Type +--------------------------------------------------------------------------- + %s %s Normal %s %s %s 64 THICK +=========================================================================== +""" % (LUNInfo['ID'], LUNInfo['RAID Group ID'], + LUNInfo['Owner Controller'], str(int(LUNInfo['Size']) * 1024), + LUNInfo['Name']) + else: + if ((self._test_flg == 'check_for_fail') or + (self._test_flg == 'check_for_T')): + out = """/>showlun +============================================================================ + LUN Information +---------------------------------------------------------------------------- + ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB)\ + LUN Name Stripe Unit Size(KB) Lun Type +---------------------------------------------------------------------------- + %s %s -- Normal %s %s %s 64 THICK + %s %s -- Normal %s %s %s 64 THICK +============================================================================ +""" % (LUNInfo['ID'], LUNInfo['RAID Group ID'], LUNInfo['Owner Controller'], + str(int(LUNInfo['Size']) * 1024), LUNInfo['Name'], LUNInfoCopy['ID'], + LUNInfoCopy['RAID Group ID'], LUNInfoCopy['Owner Controller'], + str(int(LUNInfoCopy['Size']) * 1024), LUNInfoCopy['Name']) + elif self._test_flg == 'check_for_Dorado2100G2': + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID Status Controller Visible Capacity(MB) LUN Name Lun Type +--------------------------------------------------------------------------- + %s Normal %s %s %s THICK + %s Normal %s %s %s THICK +=========================================================================== +""" % (LUNInfo['ID'], LUNInfo['Owner Controller'], + str(int(LUNInfo['Size']) * 1024), LUNInfo['Name'], + LUNInfoCopy['ID'], LUNInfoCopy['Owner Controller'], + str(int(LUNInfoCopy['Size']) * 1024), LUNInfoCopy['Name']) + elif self._test_flg == 'check_for_Dorado5100': + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name \ + Strip Unit Size(KB) Lun Type +--------------------------------------------------------------------------- + %s %s Normal %s %s %s 64 THICK + %s %s Norma %s %s %s 64 THICK +=========================================================================== +""" % (LUNInfo['ID'], LUNInfo['RAID Group ID'], LUNInfo['Owner Controller'], + str(int(LUNInfo['Size']) * 1024), LUNInfo['Name'], + LUNInfoCopy['ID'], LUNInfoCopy['RAID Group ID'], + LUNInfoCopy['Owner Controller'], str(int(LUNInfoCopy['Size']) * 1024), + LUNInfoCopy['Name']) + + elif cmd == 'createhostgroup': + MapInfo['Host Group ID'] = '1' + MapInfo['Host Group Name'] = FakeConfInfo['HostGroup'] + out = 'command operates successfully' + + elif cmd == 'showhostgroup': + if MapInfo['Host Group ID'] is None: + out = """/>showhostgroup +============================================================ + Host Group Information +------------------------------------------------------------ + Host Group ID Name File Engine Cluster +------------------------------------------------------------ + 0 Default Group NO +============================================================ +""" + else: + out = """/>showhostgroup +============================================================ + Host Group Information +------------------------------------------------------------ + Host Group ID Name File Engine Cluster +------------------------------------------------------------ + 0 Default Group NO + %s %s NO +============================================================ +""" % (MapInfo['Host Group ID'], MapInfo['Host Group Name']) + + elif cmd == 'addhost': + MapInfo['Host ID'] = '1' + MapInfo['Host Name'] = FakeConfInfo['HostnamePrefix'] + \ + str(hash(FakeConnector['initiator'])) + MapInfo['Os Type'] = 'Linux' + out = 'command operates successfully' + + elif cmd == 'delhost': + MapInfo['Host ID'] = None + MapInfo['Host Name'] = None + MapInfo['Os Type'] = None + out = 'command operates successfully' + + elif cmd == 'showhost': + if MapInfo['Host ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showhost +======================================================= + Host Information +------------------------------------------------------- + Host ID Host Name Host Group ID Os Type +------------------------------------------------------- + %s %s %s Linux +======================================================= +""" % (MapInfo['Host ID'], MapInfo['Host Name'], MapInfo['Host Group ID']) + + elif cmd == 'createluncopy': + LUNCopy['Name'] = LUNInfoCopy['Name'] + LUNCopy['ID'] = FakeLUNCopy['ID'] + LUNCopy['Type'] = FakeLUNCopy['Type'] + LUNCopy['State'] = FakeLUNCopy['State'] + LUNCopy['Status'] = FakeLUNCopy['Status'] + out = 'command operates successfully' + + elif cmd == 'delluncopy': + LUNCopy['Name'] = None + LUNCopy['ID'] = None + LUNCopy['Type'] = None + LUNCopy['State'] = None + LUNCopy['Status'] = None + out = 'command operates successfully' + + elif cmd == 'chgluncopystatus': + LUNCopy['State'] = 'Complete' + out = 'command operates successfully' + + elif cmd == 'showluncopy': + if LUNCopy['ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showluncopy +============================================================================ + LUN Copy Information +---------------------------------------------------------------------------- + LUN Copy Name LUN Copy ID Type LUN Copy State LUN Copy Status +---------------------------------------------------------------------------- + %s %s %s %s %s +============================================================================ +""" % (LUNCopy['Name'], LUNCopy['ID'], LUNCopy['Type'], + LUNCopy['State'], LUNCopy['Status']) + + elif cmd == 'showiscsitgtname': + if ((self._test_flg == 'check_for_fail') or + (self._test_flg == 'check_for_T')): + out = """/>showiscsitgtname +============================================================================ + ISCSI Name +---------------------------------------------------------------------------- + Iscsi Name | %s +============================================================================ +""" % FakeConfInfo['TargetIQN'] + elif (self._test_flg == 'check_for_Dorado2100G2' or + self._test_flg == 'check_for_Dorado5100'): + out = """/>showiscsitgtname +============================================================================ + ISCSI Name +---------------------------------------------------------------------------- + Iscsi Name | %s +============================================================================ +""" % FakeConfInfo['TargetIQN'] + + elif cmd == 'showiscsiip': + out = """/>showiscsiip +============================================================================ + iSCSI IP Information +---------------------------------------------------------------------------- + Controller ID Interface Module ID Port ID IP Address Mask +---------------------------------------------------------------------------- + A 0 P1 %s 255.255.255.0 +============================================================================ +""" % FakeConfInfo['Initiator TargetIP'] + + elif cmd == 'addhostport': + MapInfo['INI Port ID'] = HostPort['ID'] + MapInfo['INI Port Name'] = HostPort['Name'] + MapInfo['INI Port Info'] = HostPort['Info'] + out = 'command operates successfully' + + elif cmd == 'delhostport': + MapInfo['INI Port ID'] = None + MapInfo['INI Port Name'] = None + MapInfo['INI Port Info'] = None + out = 'command operates successfully' + + elif cmd == 'showhostport': + if MapInfo['INI Port ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showhostport -host 3 +============================================================================== + Host Port Information +------------------------------------------------------------------------------ +Port ID Port Name Port Information Port Type Host ID \ +Link Status Multipath Type +------------------------------------------------------------------------------ + %s %s %s ISCSITGT %s Unconnected Default +============================================================================== +""" % (MapInfo['INI Port ID'], MapInfo['INI Port Name'], + MapInfo['INI Port Info'], MapInfo['Host ID']) + + elif cmd == 'addiscsiini': + HostPort['ID'] = '1' + HostPort['Name'] = 'iSCSIInitiator001' + HostPort['Info'] = FakeConfInfo['Initiator Name'] + out = 'command operates successfully' + + elif cmd == 'deliscsiini': + HostPort['ID'] = None + HostPort['Name'] = None + HostPort['Info'] = None + out = 'command operates successfully' + + elif cmd == 'showiscsiini': + if HostPort['ID'] is None: + out = 'Error: The parameter is wrong.' + else: + out = """/>showiscsiini -ini iqn.1993-08.org\ +.debian:01:503629a9d3f +======================================================== + Initiator Information +-------------------------------------------------------- + Initiator Name Chap Status +-------------------------------------------------------- + %s Disable +======================================================== +""" % (HostPort['Info']) + + elif cmd == 'showrg': + out = """/>showrg +===================================================================== + RAID Group Information +--------------------------------------------------------------------- + ID Level Status Free Capacity(MB) Disk List Name +--------------------------------------------------------------------- + 0 RAID6 Normal 1024 0,0;0,2;0,4;0,5;0,6;0,7; RAID003 + %s %s %s %s %s %s +===================================================================== +""" % (FakePoolInfo['ID'], FakePoolInfo['Level'], + FakePoolInfo['Status'], FakePoolInfo['Free Capacity'], + FakePoolInfo['Disk List'], FakePoolInfo['Name']) + + elif cmd == 'showrespool': + out = """/>showrespool +============================================================================ + Resource Pool Information +---------------------------------------------------------------------------- + Pool ID Size(MB) Usage(MB) Valid Size(MB) Alarm Threshold(%) +---------------------------------------------------------------------------- + A 5130.0 0.0 5130.0 80 + B 3082.0 0.0 3082.0 80 +============================================================================ +""" + + elif cmd == 'chglun': + out = 'command operates successfully' + + out = out.replace('\n', '\r\n') + return out + + def _get_lun_controller(self, lunid): + pass diff --git a/cinder/tests/test_iscsi.py b/cinder/tests/test_iscsi.py new file mode 100644 index 0000000000..321e805146 --- /dev/null +++ b/cinder/tests/test_iscsi.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import shutil +import string +import tempfile + +from cinder.brick.iscsi import iscsi +from cinder import test +from cinder.volume import utils as volume_utils + + +class TargetAdminTestCase(object): + + def setUp(self): + self.cmds = [] + + self.tid = 1 + self.target_name = 'iqn.2011-09.org.foo.bar:blaa' + self.lun = 10 + self.path = '/foo' + self.vol_id = 'blaa' + + self.script_template = None + self.stubs.Set(os.path, 'isfile', lambda _: True) + self.stubs.Set(os, 'unlink', lambda _: '') + self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) + self.stubs.Set(iscsi.LioAdm, '_get_target', self.fake_get_target) + self.stubs.Set(iscsi.LioAdm, '__init__', self.fake_init) + + def fake_init(obj): + return + + def fake_get_target(obj, iqn): + return 1 + + def get_script_params(self): + return {'tid': self.tid, + 'target_name': self.target_name, + 'lun': self.lun, + 'path': self.path} + + def get_script(self): + return self.script_template % self.get_script_params() + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def clear_cmds(self): + self.cmds = [] + + def verify_cmds(self, cmds): + self.assertEqual(len(cmds), len(self.cmds)) + for a, b in zip(cmds, self.cmds): + self.assertEqual(a, b) + + def verify(self): + script = self.get_script() + cmds = [] + for line in script.split('\n'): + if not line.strip(): + continue + cmds.append(line) + self.verify_cmds(cmds) + + def run_commands(self): + tgtadm = iscsi.get_target_admin() + tgtadm.set_execute(self.fake_execute) + tgtadm.create_iscsi_target(self.target_name, self.tid, + self.lun, self.path) + tgtadm.show_target(self.tid, iqn=self.target_name) + tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id) + + def test_target_admin(self): + self.clear_cmds() + self.run_commands() + self.verify() + + +class TgtAdmTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(TgtAdmTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.persist_tempdir = tempfile.mkdtemp() + self.flags(iscsi_helper='tgtadm') + self.flags(volumes_dir=self.persist_tempdir) + self.script_template = "\n".join([ + 'tgt-admin --update iqn.2011-09.org.foo.bar:blaa', + 'tgt-admin --force ' + '--delete iqn.2010-10.org.openstack:volume-blaa']) + + def tearDown(self): + try: + shutil.rmtree(self.persist_tempdir) + except OSError: + pass + super(TgtAdmTestCase, self).tearDown() + + +class IetAdmTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='ietadm') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=fileio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class IetAdmBlockIOTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmBlockIOTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='ietadm') + self.flags(iscsi_iotype='blockio') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=blockio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class IetAdmFileIOTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmFileIOTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='ietadm') + self.flags(iscsi_iotype='fileio') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=fileio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class IetAdmAutoIOTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmAutoIOTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.stubs.Set(volume_utils, 'is_block', lambda _: True) + self.flags(iscsi_helper='ietadm') + self.flags(iscsi_iotype='auto') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=blockio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class LioAdmTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(LioAdmTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.persist_tempdir = tempfile.mkdtemp() + self.flags(iscsi_helper='lioadm') + self.script_template = "\n".join([ + 'rtstool create ' + '/foo iqn.2011-09.org.foo.bar:blaa test_id test_pass', + 'rtstool delete iqn.2010-10.org.openstack:volume-blaa']) diff --git a/cinder/tests/test_migrations.conf b/cinder/tests/test_migrations.conf new file mode 100644 index 0000000000..774f149940 --- /dev/null +++ b/cinder/tests/test_migrations.conf @@ -0,0 +1,9 @@ +[DEFAULT] +# Set up any number of migration data stores you want, one +# The "name" used in the test is the config variable key. +#sqlite=sqlite:///test_migrations.db +sqlite=sqlite:// +#mysql=mysql://root:@localhost/test_migrations +#postgresql=postgresql://user:pass@localhost/test_migrations +[walk_style] +snake_walk=yes diff --git a/cinder/tests/test_migrations.py b/cinder/tests/test_migrations.py new file mode 100644 index 0000000000..47b1050c51 --- /dev/null +++ b/cinder/tests/test_migrations.py @@ -0,0 +1,629 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for database migrations. This test case reads the configuration +file test_migrations.conf for database connection settings +to use in the tests. For each connection found in the config file, +the test case runs a series of test cases to ensure that migrations work +properly both upgrading and downgrading, and that no data loss occurs +if possible. +""" + +import commands +import ConfigParser +import os +import urlparse +import uuid + +from migrate.versioning import repository +import sqlalchemy + +import cinder.db.migration as migration +import cinder.db.sqlalchemy.migrate_repo +from cinder.db.sqlalchemy.migration import versioning_api as migration_api +from cinder.openstack.common import log as logging +from cinder import test + +LOG = logging.getLogger('cinder.tests.test_migrations') + + +def _get_connect_string(backend, + user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): + """ + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + if backend == "postgres": + backend = "postgresql+psycopg2" + + return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + % locals()) + + +def _is_mysql_avail(**kwargs): + return _is_backend_avail('mysql', **kwargs) + + +def _is_backend_avail(backend, + user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): + try: + if backend == "mysql": + connect_uri = _get_connect_string("mysql", user=user, + passwd=passwd, database=database) + elif backend == "postgres": + connect_uri = _get_connect_string("postgres", user=user, + passwd=passwd, database=database) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def _have_mysql(): + present = os.environ.get('NOVA_TEST_MYSQL_PRESENT') + if present is None: + return _is_backend_avail('mysql') + return present.lower() in ('', 'true') + + +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data.""" + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + return sqlalchemy.Table(name, metadata, autoload=True) + + +class TestMigrations(test.TestCase): + """Test sqlalchemy-migrate migrations.""" + + DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + CONFIG_FILE_PATH = os.environ.get('CINDER_TEST_MIGRATIONS_CONF', + DEFAULT_CONFIG_FILE) + MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__ + REPOSITORY = repository.Repository( + os.path.abspath(os.path.dirname(MIGRATE_FILE))) + + def setUp(self): + super(TestMigrations, self).setUp() + + self.snake_walk = False + self.test_databases = {} + + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH) + if not self.test_databases: + if os.path.exists(TestMigrations.CONFIG_FILE_PATH): + cp = ConfigParser.RawConfigParser() + try: + cp.read(TestMigrations.CONFIG_FILE_PATH) + defaults = cp.defaults() + for key, value in defaults.items(): + self.test_databases[key] = value + self.snake_walk = cp.getboolean('walk_style', 'snake_walk') + except ConfigParser.ParsingError, e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in self.test_databases.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + # We start each test case with a completely blank slate. + self._reset_databases() + + def tearDown(self): + + # We destroy the test data store between each test case, + # and recreate it, which ensures that we have no side-effects + # from the tests + self._reset_databases() + super(TestMigrations, self).tearDown() + + def _reset_databases(self): + def execute_cmd(cmd=None): + status, output = commands.getstatusoutput(cmd) + LOG.debug(output) + self.assertEqual(0, status) + for key, engine in self.engines.items(): + conn_string = self.test_databases[key] + conn_pieces = urlparse.urlparse(conn_string) + engine.dispose() + if conn_string.startswith('sqlite'): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + elif conn_string.startswith('mysql'): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + if auth_pieces[1].strip(): + password = "-p\"%s\"" % auth_pieces[1] + sql = ("drop database if exists %(database)s; " + "create database %(database)s;") % locals() + cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " + "-e \"%(sql)s\"") % locals() + execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + # note(krtaylor): File creation problems with tests in + # venv using .pgpass authentication, changed to + # PGPASSWORD environment variable which is no longer + # planned to be deprecated + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + # note(boris-42): We must create and drop database, we can't + # drop database which we have connected to, so for such + # operations there is a special database template1. + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + sql = ("drop database if exists %(database)s;") % locals() + droptable = sqlcmd % locals() + execute_cmd(droptable) + sql = ("create database %(database)s;") % locals() + createtable = sqlcmd % locals() + execute_cmd(createtable) + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') + + def test_walk_versions(self): + """ + Walks all version scripts for each tested database, ensuring + that there are no errors in the version scripts for each engine + """ + for key, engine in self.engines.items(): + self._walk_versions(engine, self.snake_walk) + + def test_mysql_connect_fail(self): + """ + Test that we can trigger a mysql connection failure and we fail + gracefully to ensure we don't break people without mysql + """ + if _is_mysql_avail(user="openstack_cifail"): + self.fail("Shouldn't have connected") + + @test.skip_unless(_have_mysql(), "mysql not available") + def test_mysql_innodb(self): + """ + Test that table creation on mysql only builds InnoDB tables + """ + # add this to the global lists to make reset work with it, it's removed + # automaticaly in tearDown so no need to clean it up here. + connect_string = _get_connect_string('mysql') + engine = sqlalchemy.create_engine(connect_string) + self.engines["mysqlcitest"] = engine + self.test_databases["mysqlcitest"] = connect_string + + # build a fully populated mysql database with all the tables + self._reset_databases() + self._walk_versions(engine, False, False) + + uri = _get_connect_string('mysql', database="information_schema") + connection = sqlalchemy.create_engine(uri).connect() + + # sanity check + total = connection.execute("SELECT count(*) " + "from information_schema.TABLES " + "where TABLE_SCHEMA='openstack_citest'") + self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") + + noninnodb = connection.execute("SELECT count(*) " + "from information_schema.TABLES " + "where TABLE_SCHEMA='openstack_citest' " + "and ENGINE!='InnoDB' " + "and TABLE_NAME!='migrate_version'") + count = noninnodb.scalar() + self.assertEqual(count, 0, "%d non InnoDB tables created" % count) + + def test_postgresql_connect_fail(self): + """ + Test that we can trigger a postgres connection failure and we fail + gracefully to ensure we don't break people without postgres + """ + if _is_backend_avail('postgres', user="openstack_cifail"): + self.fail("Shouldn't have connected") + + @test.skip_unless(_is_backend_avail('postgres'), + "postgresql not available") + def test_postgresql_opportunistically(self): + # add this to the global lists to make reset work with it, it's removed + # automatically in tearDown so no need to clean it up here. + connect_string = _get_connect_string("postgres") + engine = sqlalchemy.create_engine(connect_string) + self.engines["postgresqlcitest"] = engine + self.test_databases["postgresqlcitest"] = connect_string + + # build a fully populated postgresql database with all the tables + self._reset_databases() + self._walk_versions(engine, False, False) + + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.INIT_VERSION) + self.assertEqual(migration.INIT_VERSION, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, + migration.INIT_VERSION + 1) + + LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest) + + for version in xrange(migration.INIT_VERSION + 2, + TestMigrations.REPOSITORY.latest + 1): + # upgrade -> downgrade -> upgrade + self._migrate_up(engine, version, with_data=True) + if snake_walk: + self._migrate_down(engine, version - 1) + self._migrate_up(engine, version) + + if downgrade: + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed( + xrange(migration.INIT_VERSION + 1, + TestMigrations.REPOSITORY.latest)): + # downgrade -> upgrade -> downgrade + self._migrate_down(engine, version) + if snake_walk: + self._migrate_up(engine, version + 1) + self._migrate_down(engine, version) + + def _migrate_down(self, engine, version): + migration_api.downgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + def _migrate_up(self, engine, version, with_data=False): + """migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _prerun_### and + _check_### functions in the main test. + """ + # NOTE(sdague): try block is here because it's impossible to debug + # where a failed data migration happens otherwise + try: + if with_data: + data = None + prerun = getattr(self, "_prerun_%3.3d" % version, None) + if prerun: + data = prerun(engine) + + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual( + version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + if with_data: + check = getattr(self, "_check_%3.3d" % version, None) + if check: + check(engine, data) + except Exception: + LOG.error("Failed to migrate to version %s on engine %s" % + (version, engine)) + raise + + # migration 004 - change volume types to UUID + def _prerun_004(self, engine): + data = { + 'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1', + 'volume_type_id': 1}, + {'id': str(uuid.uuid4()), 'host': 'test2', + 'volume_type_id': 1}, + {'id': str(uuid.uuid4()), 'host': 'test3', + 'volume_type_id': 3}, + ], + 'volume_types': [{'name': 'vtype1'}, + {'name': 'vtype2'}, + {'name': 'vtype3'}, + ], + 'volume_type_extra_specs': [{'volume_type_id': 1, + 'key': 'v1', + 'value': 'hotep', + }, + {'volume_type_id': 1, + 'key': 'v2', + 'value': 'bending rodrigez', + }, + {'volume_type_id': 2, + 'key': 'v3', + 'value': 'bending rodrigez', + }, + ]} + + volume_types = get_table(engine, 'volume_types') + for vtype in data['volume_types']: + r = volume_types.insert().values(vtype).execute() + vtype['id'] = r.inserted_primary_key[0] + + volume_type_es = get_table(engine, 'volume_type_extra_specs') + for vtes in data['volume_type_extra_specs']: + r = volume_type_es.insert().values(vtes).execute() + vtes['id'] = r.inserted_primary_key[0] + + volumes = get_table(engine, 'volumes') + for vol in data['volumes']: + r = volumes.insert().values(vol).execute() + vol['id'] = r.inserted_primary_key[0] + + return data + + def _check_004(self, engine, data): + volumes = get_table(engine, 'volumes') + v1 = volumes.select(volumes.c.id == + data['volumes'][0]['id'] + ).execute().first() + v2 = volumes.select(volumes.c.id == + data['volumes'][1]['id'] + ).execute().first() + v3 = volumes.select(volumes.c.id == + data['volumes'][2]['id'] + ).execute().first() + + volume_types = get_table(engine, 'volume_types') + vt1 = volume_types.select(volume_types.c.name == + data['volume_types'][0]['name'] + ).execute().first() + vt2 = volume_types.select(volume_types.c.name == + data['volume_types'][1]['name'] + ).execute().first() + vt3 = volume_types.select(volume_types.c.name == + data['volume_types'][2]['name'] + ).execute().first() + + vtes = get_table(engine, 'volume_type_extra_specs') + vtes1 = vtes.select(vtes.c.key == + data['volume_type_extra_specs'][0]['key'] + ).execute().first() + vtes2 = vtes.select(vtes.c.key == + data['volume_type_extra_specs'][1]['key'] + ).execute().first() + vtes3 = vtes.select(vtes.c.key == + data['volume_type_extra_specs'][2]['key'] + ).execute().first() + + self.assertEqual(v1['volume_type_id'], vt1['id']) + self.assertEqual(v2['volume_type_id'], vt1['id']) + self.assertEqual(v3['volume_type_id'], vt3['id']) + + self.assertEqual(vtes1['volume_type_id'], vt1['id']) + self.assertEqual(vtes2['volume_type_id'], vt1['id']) + self.assertEqual(vtes3['volume_type_id'], vt2['id']) + + def test_migration_005(self): + """Test that adding source_volid column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.INIT_VERSION) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 5) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertTrue(isinstance(volumes.c.source_volid.type, + sqlalchemy.types.VARCHAR)) + + def _metadatas(self, upgrade_to, downgrade_to=None): + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.INIT_VERSION) + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + upgrade_to) + + if downgrade_to is not None: + migration_api.downgrade( + engine, TestMigrations.REPOSITORY, downgrade_to) + + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + yield metadata + + def metadatas_upgraded_to(self, revision): + return self._metadatas(revision) + + def metadatas_downgraded_from(self, revision): + return self._metadatas(revision, revision - 1) + + def test_upgrade_006_adds_provider_location(self): + for metadata in self.metadatas_upgraded_to(6): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + self.assertTrue(isinstance(snapshots.c.provider_location.type, + sqlalchemy.types.VARCHAR)) + + def test_downgrade_006_removes_provider_location(self): + for metadata in self.metadatas_downgraded_from(6): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + + self.assertTrue('provider_location' not in snapshots.c) + + def test_upgrade_007_adds_fk(self): + for metadata in self.metadatas_upgraded_to(7): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + volumes = sqlalchemy.Table('volumes', metadata, autoload=True) + + fkey, = snapshots.c.volume_id.foreign_keys + + self.assertEquals(volumes.c.id, fkey.column) + + def test_downgrade_007_removes_fk(self): + for metadata in self.metadatas_downgraded_from(7): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + + self.assertEquals(0, len(snapshots.c.volume_id.foreign_keys)) + + def test_migration_008(self): + """Test that adding and removing the backups table works correctly""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.INIT_VERSION) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 7) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "backups")) + backups = sqlalchemy.Table('backups', + metadata, + autoload=True) + + self.assertTrue(isinstance(backups.c.created_at.type, + sqlalchemy.types.DATETIME)) + self.assertTrue(isinstance(backups.c.updated_at.type, + sqlalchemy.types.DATETIME)) + self.assertTrue(isinstance(backups.c.deleted_at.type, + sqlalchemy.types.DATETIME)) + self.assertTrue(isinstance(backups.c.deleted.type, + sqlalchemy.types.BOOLEAN)) + self.assertTrue(isinstance(backups.c.id.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.volume_id.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.user_id.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.project_id.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.host.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.availability_zone.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.display_name.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.display_description.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.container.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.status.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.fail_reason.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.service_metadata.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.service.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(backups.c.size.type, + sqlalchemy.types.INTEGER)) + self.assertTrue(isinstance(backups.c.object_count.type, + sqlalchemy.types.INTEGER)) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 7) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "backups")) + + def test_migration_009(self): + """Test adding snapshot_metadata table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.INIT_VERSION) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "snapshot_metadata")) + snapshot_metadata = sqlalchemy.Table('snapshot_metadata', + metadata, + autoload=True) + + self.assertTrue(isinstance(snapshot_metadata.c.created_at.type, + sqlalchemy.types.DATETIME)) + self.assertTrue(isinstance(snapshot_metadata.c.updated_at.type, + sqlalchemy.types.DATETIME)) + self.assertTrue(isinstance(snapshot_metadata.c.deleted_at.type, + sqlalchemy.types.DATETIME)) + self.assertTrue(isinstance(snapshot_metadata.c.deleted.type, + sqlalchemy.types.BOOLEAN)) + self.assertTrue(isinstance(snapshot_metadata.c.deleted.type, + sqlalchemy.types.BOOLEAN)) + self.assertTrue(isinstance(snapshot_metadata.c.id.type, + sqlalchemy.types.INTEGER)) + self.assertTrue(isinstance(snapshot_metadata.c.snapshot_id.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(snapshot_metadata.c.key.type, + sqlalchemy.types.VARCHAR)) + self.assertTrue(isinstance(snapshot_metadata.c.value.type, + sqlalchemy.types.VARCHAR)) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 8) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "snapshot_metadata")) diff --git a/cinder/tests/test_misc.py b/cinder/tests/test_misc.py new file mode 100644 index 0000000000..60ad26b7ed --- /dev/null +++ b/cinder/tests/test_misc.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob +import os + + +from cinder import exception +from cinder import test + + +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + # NOTE(dprince): disable format errors since we are not passing kwargs + self.flags(fatal_exception_format_errors=False) + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + +class ProjectTestCase(test.TestCase): + def test_all_migrations_have_downgrade(self): + topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') + py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy", + "migrate_repo", "versions", "*.py") + missing_downgrade = [] + for path in glob.iglob(py_glob): + has_upgrade = False + has_downgrade = False + with open(path, "r") as f: + for line in f: + if 'def upgrade(' in line: + has_upgrade = True + if 'def downgrade(' in line: + has_downgrade = True + + if has_upgrade and not has_downgrade: + fname = os.path.basename(path) + missing_downgrade.append(fname) + + helpful_msg = (_("The following migrations are missing a downgrade:" + "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) + self.assert_(not missing_downgrade, helpful_msg) diff --git a/cinder/tests/test_netapp.py b/cinder/tests/test_netapp.py new file mode 100644 index 0000000000..c5e5738814 --- /dev/null +++ b/cinder/tests/test_netapp.py @@ -0,0 +1,2361 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for NetApp volume driver + +""" + +import BaseHTTPServer +import httplib +import logging as generic_logging +import shutil +import StringIO +import tempfile + +from lxml import etree + +from cinder.exception import VolumeBackendAPIException +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.netapp import iscsi +from cinder.volume.drivers.netapp.iscsi import netapp_opts + + +LOG = logging.getLogger("cinder.volume.driver") + +#NOTE(rushiagr): A bug in Suds package +# (https://fedorahosted.org/suds/ticket/359) causes nasty errors +# with tests while using debug-level logging. Unfortunately, +# the maintainers of the package stopped tending to any patch +# requests almost two years back. So setting the logging level to +# INFO here seems the only plausible workaround. +generic_logging.getLogger('suds.mx.core').setLevel(generic_logging.INFO) + +WSDL_HEADER = """ +""" + +WSDL_TYPES = """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + +WSDL_TRAILER = """ + + +""" + +RESPONSE_PREFIX = """ +""" + +RESPONSE_SUFFIX = """""" + +APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext', + 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit', + 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout', + 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext', + 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart', + 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd', + 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd', + 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd', + 'StorageServiceDatasetProvision'] + +iter_count = 0 +iter_table = {} + + +def create_configuration(): + configuration = conf.Configuration(None) + configuration.append_config_values(netapp_opts) + return configuration + + +class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + """HTTP handler that doesn't spam the log.""" + + def log_message(self, format, *args): + pass + + +class FakeDfmServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run.""" + + def do_GET(s): + """Respond to a GET request.""" + if '/dfm.wsdl' != s.path: + s.send_response(404) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "application/wsdl+xml") + s.end_headers() + out = s.wfile + out.write(WSDL_HEADER) + out.write(WSDL_TYPES) + for api in APIS: + out.write('' % api) + out.write('' % api) + out.write('') + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + for api in APIS: + out.write('' % api) + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + out.write('') + out.write('') + for api in APIS: + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + out.write('') + out.write('') + out.write(WSDL_TRAILER) + + def do_POST(s): + """Respond to a POST request.""" + if '/apis/soap/v1' != s.path: + s.send_response(404) + s.end_headers + return + request_xml = s.rfile.read(int(s.headers['Content-Length'])) + ntap_ns = 'http://www.netapp.com/management/v1' + nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/', + 'na': ntap_ns} + root = etree.fromstring(request_xml) + + body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0] + request = body.getchildren()[0] + tag = request.tag + if not tag.startswith('{' + ntap_ns + '}'): + s.send_response(500) + s.end_headers + return + api = tag[(2 + len(ntap_ns)):] + global iter_count + global iter_table + if 'DatasetListInfoIterStart' == api: + iter_name = 'dataset_%s' % iter_count + iter_count = iter_count + 1 + iter_table[iter_name] = 0 + body = """ + 1 + %s + """ % iter_name + elif 'DatasetListInfoIterNext' == api: + tags = body.xpath('na:DatasetListInfoIterNext/na:Tag', + namespaces=nsmap) + iter_name = tags[0].text + if iter_table[iter_name]: + body = """ + + 0 + """ + else: + iter_table[iter_name] = 1 + body = """ + + + 0 + + + OpenStackProject + testproj + + + OpenStackVolType + + + + OpenStack_testproj + + + 1 + """ + elif 'DatasetListInfoIterEnd' == api: + body = """""" + elif 'DatasetEditBegin' == api: + body = """ + 0 + """ + elif 'DatasetEditCommit' == api: + body = """ + false + + + 0 + + + """ + elif 'DatasetProvisionMember' == api: + body = """""" + elif 'DatasetRemoveMember' == api: + body = """""" + elif 'DfmAbout' == api: + body = """""" + elif 'DpJobProgressEventListIterStart' == api: + iter_name = 'dpjobprogress_%s' % iter_count + iter_count = iter_count + 1 + iter_table[iter_name] = 0 + body = """ + 2 + %s + """ % iter_name + elif 'DpJobProgressEventListIterNext' == api: + tags = body.xpath('na:DpJobProgressEventListIterNext/na:Tag', + namespaces=nsmap) + iter_name = tags[0].text + if iter_table[iter_name]: + body = """""" + else: + iter_table[iter_name] = 1 + name = ('filer:/OpenStack_testproj/volume-00000001/' + 'volume-00000001') + body = """ + + + normal + lun-create + + 0 + %s + + + + normal + job-end + + + 2 + """ % name + elif 'DpJobProgressEventListIterEnd' == api: + body = """""" + elif 'DatasetMemberListInfoIterStart' == api: + iter_name = 'datasetmember_%s' % iter_count + iter_count = iter_count + 1 + iter_table[iter_name] = 0 + body = """ + 1 + %s + """ % iter_name + elif 'DatasetMemberListInfoIterNext' == api: + tags = body.xpath('na:DatasetMemberListInfoIterNext/na:Tag', + namespaces=nsmap) + iter_name = tags[0].text + if iter_table[iter_name]: + body = """ + + 0 + """ + else: + iter_table[iter_name] = 1 + name = ('filer:/OpenStack_testproj/volume-00000001/' + 'volume-00000001') + body = """ + + + 0 + %s + + + 1 + """ % name + elif 'DatasetMemberListInfoIterEnd' == api: + body = """""" + elif 'HostListInfoIterStart' == api: + body = """ + 1 + host + """ + elif 'HostListInfoIterNext' == api: + body = """ + + + 1.2.3.4 + 0 + filer + + + 1 + """ + elif 'HostListInfoIterEnd' == api: + body = """""" + elif 'LunListInfoIterStart' == api: + body = """ + 1 + lun + """ + elif 'LunListInfoIterNext' == api: + path = 'OpenStack_testproj/volume-00000001/volume-00000001' + body = """ + + + 0 + %s + volume-00000001 + + + 1 + """ % path + elif 'LunListInfoIterEnd' == api: + body = """""" + elif 'ApiProxy' == api: + names = body.xpath('na:ApiProxy/na:Request/na:Name', + namespaces=nsmap) + proxy = names[0].text + if 'clone-list-status' == proxy: + op_elem = body.xpath('na:ApiProxy/na:Request/na:Args/' + 'clone-id/clone-id-info/clone-op-id', + namespaces=nsmap) + proxy_body = """ + + completed + + """ + if '0' == op_elem[0].text: + proxy_body = '' + elif 'clone-start' == proxy: + proxy_body = """ + + 1 + xxx + + """ + elif 'igroup-list-info' == proxy: + igroup = 'openstack-iqn.1993-08.org.debian:01:23456789' + initiator = 'iqn.1993-08.org.debian:01:23456789' + proxy_body = """ + + %s + iscsi + linux + + + %s + + + + """ % (igroup, initiator) + elif 'igroup-create' == proxy: + proxy_body = '' + elif 'igroup-add' == proxy: + proxy_body = '' + elif 'lun-map-list-info' == proxy: + proxy_body = '' + elif 'lun-map' == proxy: + proxy_body = '0' + elif 'lun-unmap' == proxy: + proxy_body = '' + elif 'iscsi-portal-list-info' == proxy: + proxy_body = """ + + 1.2.3.4 + 3260 + 1000 + + """ + elif 'iscsi-node-get-name' == proxy: + target = 'iqn.1992-08.com.netapp:sn.111111111' + proxy_body = '%s' % target + else: + # Unknown proxy API + s.send_response(500) + s.end_headers + return + api = api + ':' + proxy + proxy_header = '' + proxy_trailer = """passed + """ + body = proxy_header + proxy_body + proxy_trailer + else: + # Unknown API + s.send_response(500) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + s.wfile.write(RESPONSE_PREFIX) + s.wfile.write(body) + s.wfile.write(RESPONSE_SUFFIX) + + +class FakeHttplibSocket(object): + """A fake socket implementation for httplib.HTTPResponse""" + def __init__(self, value): + self._rbuffer = StringIO.StringIO(value) + self._wbuffer = StringIO.StringIO('') + oldclose = self._wbuffer.close + + def newclose(): + self.result = self._wbuffer.getvalue() + oldclose() + self._wbuffer.close = newclose + + def makefile(self, mode, _other): + """Returns the socket's internal buffer""" + if mode == 'r' or mode == 'rb': + return self._rbuffer + if mode == 'w' or mode == 'wb': + return self._wbuffer + + +class FakeHTTPConnection(object): + """A fake httplib.HTTPConnection for netapp tests + + Requests made via this connection actually get translated and routed into + the fake Dfm handler above, we then turn the response into + the httplib.HTTPResponse that the caller expects. + """ + def __init__(self, host, timeout=None): + self.host = host + + def request(self, method, path, data=None, headers=None): + if not headers: + headers = {} + req_str = '%s %s HTTP/1.1\r\n' % (method, path) + for key, value in headers.iteritems(): + req_str += "%s: %s\r\n" % (key, value) + if data: + req_str += '\r\n%s' % data + + # NOTE(vish): normally the http transport normailizes from unicode + sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) + # NOTE(vish): stop the server from trying to look up address from + # the fake socket + FakeDfmServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDfmServerHandler(sock, '127.0.0.1:8088', None) + + self.sock = FakeHttplibSocket(sock.result) + self.http_response = httplib.HTTPResponse(self.sock) + + def set_debuglevel(self, level): + pass + + def getresponse(self): + self.http_response.begin() + return self.http_response + + def getresponsebody(self): + return self.sock.result + + +class NetAppDriverTestCase(test.TestCase): + """Test case for NetAppISCSIDriver""" + STORAGE_SERVICE = 'Openstack Service' + STORAGE_SERVICE_PREFIX = 'Openstack Service-' + PROJECT_ID = 'testproj' + VOLUME_NAME = 'volume-00000001' + VOLUME_TYPE = '' + VOLUME_SIZE = 2147483648L # 2 GB + INITIATOR = 'iqn.1993-08.org.debian:01:23456789' + + def setUp(self): + super(NetAppDriverTestCase, self).setUp() + self.tempdir = tempfile.mkdtemp() + self.flags(lock_path=self.tempdir) + driver = iscsi.NetAppISCSIDriver(configuration=create_configuration()) + self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) + driver._create_client(wsdl_url='http://localhost:8088/dfm.wsdl', + login='root', password='password', + hostname='localhost', port=8088, cache=False) + driver._set_storage_service(self.STORAGE_SERVICE) + driver._set_storage_service_prefix(self.STORAGE_SERVICE_PREFIX) + driver._set_vfiler('') + self.driver = driver + + def tearDown(self): + shutil.rmtree(self.tempdir) + super(NetAppDriverTestCase, self).tearDown() + + def test_connect(self): + self.driver.check_for_setup_error() + + def test_create_destroy(self): + self.driver._discover_luns() + self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, + self.VOLUME_TYPE, self.VOLUME_SIZE) + self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) + + def test_destroy_uncreated_volume(self): + self.driver._remove_destroy('fake-nonexistent-volume', self.PROJECT_ID) + + def test_map_unmap(self): + self.driver._discover_luns() + self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, + self.VOLUME_TYPE, self.VOLUME_SIZE) + volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID, + 'id': 0, 'provider_auth': None} + updates = self.driver._get_export(volume) + self.assertTrue(updates['provider_location']) + volume['provider_location'] = updates['provider_location'] + connector = {'initiator': self.INITIATOR} + connection_info = self.driver.initialize_connection(volume, connector) + self.assertEqual(connection_info['driver_volume_type'], 'iscsi') + properties = connection_info['data'] + self.driver.terminate_connection(volume, connector) + self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) + + def test_clone(self): + self.driver._discover_luns() + self.driver._clone_lun(0, '/vol/vol/qtree/src', '/vol/vol/qtree/dst', + False) + + def test_clone_fail(self): + self.driver._discover_luns() + self.driver._is_clone_done(0, '0', 'xxx') + + def test_cloned_volume_size_fail(self): + volume_clone_fail = {'name': 'fail', 'size': '2'} + volume_src = {'name': 'source_vol', 'size': '1'} + try: + self.driver.create_cloned_volume(volume_clone_fail, + volume_src) + raise AssertionError() + except VolumeBackendAPIException: + pass + + +WSDL_HEADER_CMODE = """ + +""" + +WSDL_TYPES_CMODE = """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + """ + +WSDL_TRAILER_CMODE = """ + + + + +""" + +RESPONSE_PREFIX_CMODE = """ + +""" + +RESPONSE_SUFFIX_CMODE = """""" + +CMODE_APIS = ['ProvisionLun', 'DestroyLun', 'CloneLun', 'MapLun', 'UnmapLun', + 'ListLuns', 'GetLunTargetDetails'] + + +class FakeCMODEServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run""" + + def do_GET(s): + """Respond to a GET request.""" + if '/ntap_cloud.wsdl' != s.path: + s.send_response(404) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "application/wsdl+xml") + s.end_headers() + out = s.wfile + out.write(WSDL_HEADER_CMODE) + out.write(WSDL_TYPES_CMODE) + for api in CMODE_APIS: + out.write('' % api) + out.write('' % api) + out.write('') + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + for api in CMODE_APIS: + out.write('' % api) + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + out.write('') + out.write('') + for api in CMODE_APIS: + out.write('' % api) + out.write('') + out.write('') + out.write('') + out.write('') + out.write('') + out.write(WSDL_TRAILER_CMODE) + + def do_POST(s): + """Respond to a POST request.""" + if '/ws/ntapcloud' != s.path: + s.send_response(404) + s.end_headers + return + request_xml = s.rfile.read(int(s.headers['Content-Length'])) + ntap_ns = 'http://cloud.netapp.com/' + nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/', + 'na': ntap_ns} + root = etree.fromstring(request_xml) + + body = root.xpath('/soapenv:Envelope/soapenv:Body', + namespaces=nsmap)[0] + request = body.getchildren()[0] + tag = request.tag + if not tag.startswith('{' + ntap_ns + '}'): + s.send_response(500) + s.end_headers + return + api = tag[(2 + len(ntap_ns)):] + if 'ProvisionLun' == api: + body = """ + lun120 + 1d9c006c-a406-42f6-a23f-5ed7a6dc33e3 + OsType + linux + """ + elif 'DestroyLun' == api: + body = """""" + elif 'CloneLun' == api: + body = """ + snapshot12 + 98ea1791d228453899d422b4611642c3 + OsType + linux + """ + elif 'MapLun' == api: + body = """""" + elif 'Unmap' == api: + body = """""" + elif 'ListLuns' == api: + body = """ + + lun1 + 20 + asdjdnsd + + """ + elif 'GetLunTargetDetails' == api: + body = """ + +
1.2.3.4
+ 3260 + 1000 + iqn.199208.com.netapp:sn.123456789 + 0 +
+
""" + else: + # Unknown API + s.send_response(500) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + s.wfile.write(RESPONSE_PREFIX_CMODE) + s.wfile.write(body) + s.wfile.write(RESPONSE_SUFFIX_CMODE) + + +class FakeCmodeHTTPConnection(object): + """A fake httplib.HTTPConnection for netapp tests + + Requests made via this connection actually get translated and routed into + the fake Dfm handler above, we then turn the response into + the httplib.HTTPResponse that the caller expects. + """ + def __init__(self, host, timeout=None): + self.host = host + + def request(self, method, path, data=None, headers=None): + if not headers: + headers = {} + req_str = '%s %s HTTP/1.1\r\n' % (method, path) + for key, value in headers.iteritems(): + req_str += "%s: %s\r\n" % (key, value) + if data: + req_str += '\r\n%s' % data + + # NOTE(vish): normally the http transport normailizes from unicode + sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) + # NOTE(vish): stop the server from trying to look up address from + # the fake socket + FakeCMODEServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeCMODEServerHandler(sock, '127.0.0.1:8080', None) + + self.sock = FakeHttplibSocket(sock.result) + self.http_response = httplib.HTTPResponse(self.sock) + + def set_debuglevel(self, level): + pass + + def getresponse(self): + self.http_response.begin() + return self.http_response + + def getresponsebody(self): + return self.sock.result + + +class NetAppCmodeISCSIDriverTestCase(test.TestCase): + """Test case for NetAppISCSIDriver""" + volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1', + 'volume_size': 2, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1', + 'volume_size': 1, 'project_id': 'project'} + volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + volume_clone_fail = {'name': 'cl_fail', 'size': 1, 'volume_name': 'fail', + 'os_type': 'linux', 'provider_location': 'cl_fail', + 'id': 'lun1', 'provider_auth': None, + 'project_id': 'project', 'display_name': None, + 'display_description': 'lun1', + 'volume_type_id': None} + connector = {'initiator': 'iqn.1993-08.org.debian:01:10'} + + def setUp(self): + super(NetAppCmodeISCSIDriverTestCase, self).setUp() + self._custom_setup() + + def _custom_setup(self): + driver = iscsi.NetAppCmodeISCSIDriver( + configuration=create_configuration()) + self.stubs.Set(httplib, 'HTTPConnection', FakeCmodeHTTPConnection) + driver._create_client(wsdl_url='http://localhost:8080/ntap_cloud.wsdl', + login='root', password='password', + hostname='localhost', port=8080, cache=False) + self.driver = driver + + def test_connect(self): + self.driver.check_for_setup_error() + + def test_create_destroy(self): + self.driver.create_volume(self.volume) + self.driver.delete_volume(self.volume) + + def test_create_vol_snapshot_destroy(self): + self.driver.create_volume(self.volume) + self.driver.create_snapshot(self.snapshot) + self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) + self.driver.delete_snapshot(self.snapshot) + self.driver.delete_volume(self.volume) + + def test_map_unmap(self): + self.driver.create_volume(self.volume) + updates = self.driver.create_export(None, self.volume) + self.assertTrue(updates['provider_location']) + self.volume['provider_location'] = updates['provider_location'] + + connection_info = self.driver.initialize_connection(self.volume, + self.connector) + self.assertEqual(connection_info['driver_volume_type'], 'iscsi') + properties = connection_info['data'] + if not properties: + raise AssertionError('Target portal is none') + self.driver.terminate_connection(self.volume, self.connector) + self.driver.delete_volume(self.volume) + + def test_fail_vol_from_snapshot_creation(self): + self.driver.create_volume(self.volume) + try: + self.driver.create_volume_from_snapshot(self.volume, + self.snapshot_fail) + raise AssertionError() + except VolumeBackendAPIException: + pass + finally: + self.driver.delete_volume(self.volume) + + def test_cloned_volume_destroy(self): + self.driver.create_volume(self.volume) + self.driver.create_cloned_volume(self.snapshot, self.volume) + self.driver.delete_volume(self.snapshot) + self.driver.delete_volume(self.volume) + + def test_fail_cloned_volume_creation(self): + self.driver.create_volume(self.volume) + try: + self.driver.create_cloned_volume(self.volume_clone_fail, + self.volume) + raise AssertionError() + except VolumeBackendAPIException: + pass + finally: + self.driver.delete_volume(self.volume) + + +RESPONSE_PREFIX_DIRECT_CMODE = """ +""" + +RESPONSE_PREFIX_DIRECT_7MODE = """ +""" + +RESPONSE_PREFIX_DIRECT = """ +""" + +RESPONSE_SUFFIX_DIRECT = """""" + + +class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run""" + + def do_GET(s): + """Respond to a GET request.""" + if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path: + s.send_response(404) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + out = s.wfile + out.write('' + '') + + def do_POST(s): + """Respond to a POST request.""" + if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path: + s.send_response(404) + s.end_headers + return + request_xml = s.rfile.read(int(s.headers['Content-Length'])) + root = etree.fromstring(request_xml) + body = [x for x in root.iterchildren()] + request = body[0] + tag = request.tag + api = etree.QName(tag).localname or tag + if 'lun-get-iter' == api: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') + if tag is None: + body = """ + + indeterminate + 512 + 1354536362 + + false + true + + falselinux + + true/vol/navneet/lun2 + 0 + false2FfGI$APyN68 + none20971520 + 0false + 0 + cec1f3d7-3d41-11e2-9cf4-123478563412 + navneetben_vserver + + <lun-get-iter-key-td> + <key-0>ben_vserver</key-0> + <key-1>/vol/navneet/lun2</key-1> + <key-2>navneet</key-2> + <key-3></key-3> + <key-4>lun2</key-4> + </lun-get-iter-key-td> + 1""" + else: + body = """ + + indeterminate + 512 + 1354536362 + + false + true + + falselinux + + true/vol/navneet/lun3 + 0 + false2FfGI$APyN68 + + none20971520 + 0false + 0 + cec1f3d7-3d41-11e2-9cf4-123478563412 + navneetben_vserver + + 1""" + elif 'volume-get-iter' == api: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') + if tag is None: + body = """ + + iscsi + Openstack + + + 214748364 + + true + + falseonline + + + nfsvol + openstack + + + 247483648 + + true + + falseonline + + + <volume-get-iter-key-td> + <key-0>openstack</key-0> + <key-1>nfsvol</key-1> + </volume-get-iter-key-td> + 2""" + else: + body = """ + + iscsi + Openstack + + + 4147483648 + + true + + falseonline + + + nfsvol + openstack + + + 8147483648 + + true + + falseonline + + + 2""" + elif 'lun-create-by-size' == api: + body = """ + 22020096""" + elif 'lun-destroy' == api: + body = """""" + elif 'igroup-get-iter' == api: + init_found = True + query = FakeDirectCMODEServerHandler._get_child_by_name(request, + 'query') + if query is not None: + igroup_info = FakeDirectCMODEServerHandler._get_child_by_name( + query, 'initiator-group-info') + if igroup_info is not None: + inits = FakeDirectCMODEServerHandler._get_child_by_name( + igroup_info, 'initiators') + if inits is not None: + init_info = \ + FakeDirectCMODEServerHandler._get_child_by_name( + inits, 'initiator-info') + init_name = \ + FakeDirectCMODEServerHandler._get_child_content( + init_info, + 'initiator-name') + if init_name == 'iqn.1993-08.org.debian:01:10': + init_found = True + else: + init_found = False + if init_found: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name( + request, 'tag') + if tag is None: + body = """ + + openstack-01f5297b-00f7-4170-bf30-69b1314b2118 + + windows + iscsi + + + iqn.1993-08.org.debian:01:10 + + openstack + + <igroup-get-iter-key-td> + <key-0>openstack</key-0> + <key-1> + openstack-01f5297b-00f7-4170-bf30-69b1314b2118< + /key-1> + </igroup-get-iter-key-td> + 1""" + else: + body = """ + + openstack-01f5297b-00f7-4170-bf30-69b1314b2118 + + linux + iscsi + + + iqn.1993-08.org.debian:01:10 + + openstack + 1""" + else: + body = """ + 0 + """ + elif 'lun-map-get-iter' == api: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') + if tag is None: + body = """ + + openstack-44c5e7e1-3306-4800-9623-259e57d56a83 + + 948ae304-06e9-11e2 + 0 + 5587e563-06e9-11e2-9cf4-123478563412 + /vol/openvol/lun1 + openstack + + + <lun-map-get-iter-key-td> + <key-0>openstack</key-0> + <key-1>openstack-01f5297b-00f7-4170-bf30-69b1314b2118< + /key-1> + </lun-map-get-iter-key-td> + + 1 + """ + else: + body = """ + + openstack-44c5e7e1-3306-4800-9623-259e57d56a83 + + 948ae304-06e9-11e2 + 0 + 5587e563-06e9-11e2-9cf4-123478563412 + /vol/openvol/lun1 + openstack + 1 + """ + elif 'lun-map' == api: + body = """1 + + """ + elif 'iscsi-service-get-iter' == api: + body = """ + + openstack + true + iqn.1992-08.com.netapp:sn.fa9:vs.105 + openstack + 1""" + elif 'iscsi-interface-get-iter' == api: + body = """ + + fas3170rre-cmode-01 + e1b-1165 + + iscsi_data_if + 10.63.165.216 + 3260true + + 5 + iscsi_data_if + 1038 + openstack + + 1""" + elif 'igroup-create' == api: + body = """""" + elif 'igroup-add' == api: + body = """""" + elif 'clone-create' == api: + body = """""" + elif 'lun-unmap' == api: + body = """""" + elif 'system-get-ontapi-version' == api: + body = """ + 1 + 19 + """ + else: + # Unknown API + s.send_response(500) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE) + s.wfile.write(RESPONSE_PREFIX_DIRECT) + s.wfile.write(body) + s.wfile.write(RESPONSE_SUFFIX_DIRECT) + + @staticmethod + def _get_child_by_name(self, name): + for child in self.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return child + return None + + @staticmethod + def _get_child_content(self, name): + """Get the content of the child""" + for child in self.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return child.text + return None + + +class FakeDirectCmodeHTTPConnection(object): + """A fake httplib.HTTPConnection for netapp tests + + Requests made via this connection actually get translated and routed into + the fake direct handler above, we then turn the response into + the httplib.HTTPResponse that the caller expects. + """ + def __init__(self, host, timeout=None): + self.host = host + + def request(self, method, path, data=None, headers=None): + if not headers: + headers = {} + req_str = '%s %s HTTP/1.1\r\n' % (method, path) + for key, value in headers.iteritems(): + req_str += "%s: %s\r\n" % (key, value) + if data: + req_str += '\r\n%s' % data + + # NOTE(vish): normally the http transport normailizes from unicode + sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) + # NOTE(vish): stop the server from trying to look up address from + # the fake socket + FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None) + + self.sock = FakeHttplibSocket(sock.result) + self.http_response = httplib.HTTPResponse(self.sock) + + def set_debuglevel(self, level): + pass + + def getresponse(self): + self.http_response.begin() + return self.http_response + + def getresponsebody(self): + return self.sock.result + + +class NetAppDirectCmodeISCSIDriverTestCase(NetAppCmodeISCSIDriverTestCase): + """Test case for NetAppISCSIDriver""" + + vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + + def setUp(self): + super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp() + + def _custom_setup(self): + driver = iscsi.NetAppDirectCmodeISCSIDriver( + configuration=create_configuration()) + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirectCmodeHTTPConnection) + driver._create_client(transport_type='http', + login='admin', password='pass', + hostname='127.0.0.1', + port='80') + driver.vserver = 'openstack' + driver.client.set_api_version(1, 15) + self.driver = driver + + def test_map_by_creating_igroup(self): + self.driver.create_volume(self.volume) + updates = self.driver.create_export(None, self.volume) + self.assertTrue(updates['provider_location']) + self.volume['provider_location'] = updates['provider_location'] + connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'} + connection_info = self.driver.initialize_connection(self.volume, + connector_new) + self.assertEqual(connection_info['driver_volume_type'], 'iscsi') + properties = connection_info['data'] + if not properties: + raise AssertionError('Target portal is none') + + def test_fail_create_vol(self): + self.assertRaises(VolumeBackendAPIException, + self.driver.create_volume, self.vol_fail) + + +class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run""" + + def do_GET(s): + """Respond to a GET request.""" + if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path: + s.send_response(404) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + out = s.wfile + out.write('' + '') + + def do_POST(s): + """Respond to a POST request.""" + if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path: + s.send_response(404) + s.end_headers + return + request_xml = s.rfile.read(int(s.headers['Content-Length'])) + root = etree.fromstring(request_xml) + body = [x for x in root.iterchildren()] + request = body[0] + tag = request.tag + api = etree.QName(tag).localname or tag + if 'lun-list-info' == api: + body = """ + false + false + + + /vol/vol1/clone1 + 20971520 + true + false + false + false + none + linux + e867d844-c2c0-11e0-9282-00a09825b3b5 + P3lgP4eTyaNl + 512 + true + 0 + indeterminate + + + /vol/vol1/lun1 + 20971520 + true + false + false + false + none + linux + 8e1e9284-c288-11e0-9282-00a09825b3b5 + P3lgP4eTc3lp + 512 + true + 0 + indeterminate + + + """ + elif 'volume-list-info' == api: + body = """ + + + vol0 + 019c8f7a-9243-11e0-9281-00a09825b3b5 + flex + 32_bit + online + 576914493440 + 13820354560 + 563094110208 + 2 + 20 + 140848264 + 0 + 0 + 0 + 0 + 20907162 + 7010 + 518 + 31142 + 31142 + 0 + false + aggr0 + + + disabled + idle + idle for 70:36:44 + regular + sun-sat@0 + Mon Aug 8 09:34:15 EST 2011 + + Mon Aug 8 09:34:15 EST 2011 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + false + + volume + true + 14 + raid_dp,sis + block + true + false + false + false + false + unmirrored + 3 + 1 + + + /aggr0/plex0 + true + false + + + + + vol1 + 2d50ecf4-c288-11e0-9282-00a09825b3b5 + flex + 32_bit + online + 42949672960 + 44089344 + 42905583616 + 0 + 20 + 10485760 + 8192 + 8192 + 0 + 0 + 1556480 + 110 + 504 + 31142 + 31142 + 0 + false + aggr1 + + + disabled + idle + idle for 89:19:59 + regular + sun-sat@0 + Sun Aug 7 14:51:00 EST 2011 + + Sun Aug 7 14:51:00 EST 2011 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + false + + volume + true + 7 + raid4,sis + block + true + false + false + false + false + unmirrored + 2 + 1 + + + /aggr1/plex0 + true + false + + + + + """ + elif 'volume-options-list-info' == api: + body = """ + + + snapmirrored + off + + + root + false + + + ha_policy + cfo + + + striping + not_striped + + + compression + off + + + """ + elif 'lun-create-by-size' == api: + body = """ + 22020096""" + elif 'lun-destroy' == api: + body = """""" + elif 'igroup-list-info' == api: + body = """ + + + openstack-8bc96490 + iscsi + b8e1d274-c378-11e0 + linux + 0 + false + + false + false + true + + + + iqn.1993-08.org.debian:01:10 + + + + + iscsi_group + iscsi + ccb8cbe4-c36f + linux + 0 + false + + false + false + true + + + + iqn.1993-08.org.debian:01:10ca + + + + + """ + elif 'lun-map-list-info' == api: + body = """ + + """ + elif 'lun-map' == api: + body = """1 + + """ + elif 'iscsi-node-get-name' == api: + body = """ + iqn.1992-08.com.netapp:sn.135093938 + """ + elif 'iscsi-portal-list-info' == api: + body = """ + + + 10.61.176.156 + 3260 + 1000 + e0a + + + """ + elif 'igroup-create' == api: + body = """""" + elif 'igroup-add' == api: + body = """""" + elif 'clone-start' == api: + body = """ + + + 2d50ecf4-c288-11e0-9282-00a09825b3b5 + 11 + + + """ + elif 'clone-list-status' == api: + body = """ + + + completed + + + """ + elif 'lun-unmap' == api: + body = """""" + elif 'system-get-ontapi-version' == api: + body = """ + 1 + 8 + """ + elif 'lun-set-space-reservation-info' == api: + body = """""" + else: + # Unknown API + s.send_response(500) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + s.wfile.write(RESPONSE_PREFIX_DIRECT_7MODE) + s.wfile.write(RESPONSE_PREFIX_DIRECT) + s.wfile.write(body) + s.wfile.write(RESPONSE_SUFFIX_DIRECT) + + +class FakeDirect7modeHTTPConnection(object): + """A fake httplib.HTTPConnection for netapp tests + + Requests made via this connection actually get translated and routed into + the fake direct handler above, we then turn the response into + the httplib.HTTPResponse that the caller expects. + """ + def __init__(self, host, timeout=None): + self.host = host + + def request(self, method, path, data=None, headers=None): + if not headers: + headers = {} + req_str = '%s %s HTTP/1.1\r\n' % (method, path) + for key, value in headers.iteritems(): + req_str += "%s: %s\r\n" % (key, value) + if data: + req_str += '\r\n%s' % data + + # NOTE(vish): normally the http transport normailizes from unicode + sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) + # NOTE(vish): stop the server from trying to look up address from + # the fake socket + FakeDirect7MODEServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDirect7MODEServerHandler(sock, '127.0.0.1:80', None) + + self.sock = FakeHttplibSocket(sock.result) + self.http_response = httplib.HTTPResponse(self.sock) + + def set_debuglevel(self, level): + pass + + def getresponse(self): + self.http_response.begin() + return self.http_response + + def getresponsebody(self): + return self.sock.result + + +class NetAppDirect7modeISCSIDriverTestCase_NV( + NetAppDirectCmodeISCSIDriverTestCase): + """Test case for NetAppISCSIDriver + No vfiler + """ + def setUp(self): + super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp() + + def _custom_setup(self): + driver = iscsi.NetAppDirect7modeISCSIDriver( + configuration=create_configuration()) + self.stubs.Set(httplib, + 'HTTPConnection', FakeDirect7modeHTTPConnection) + driver._create_client(transport_type='http', + login='admin', password='pass', + hostname='127.0.0.1', + port='80') + driver.vfiler = None + driver.volume_list = None + self.driver = driver + + def test_create_on_select_vol(self): + self.driver.volume_list = ['vol0', 'vol1'] + self.driver.create_volume(self.volume) + self.driver.delete_volume(self.volume) + self.driver.volume_list = [] + + def test_create_fail_on_select_vol(self): + self.driver.volume_list = ['vol2', 'vol3'] + success = False + try: + self.driver.create_volume(self.volume) + except VolumeBackendAPIException: + success = True + pass + finally: + self.driver.volume_list = [] + if not success: + raise AssertionError('Failed creating on selected volumes') + + +class NetAppDirect7modeISCSIDriverTestCase_WV( + NetAppDirect7modeISCSIDriverTestCase_NV): + """Test case for NetAppISCSIDriver + With vfiler + """ + def setUp(self): + super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp() + + def _custom_setup(self): + driver = iscsi.NetAppDirect7modeISCSIDriver( + configuration=create_configuration()) + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirect7modeHTTPConnection) + driver._create_client(transport_type='http', + login='admin', password='pass', + hostname='127.0.0.1', + port='80') + driver.vfiler = 'vfiler' + driver.client.set_api_version(1, 7) + driver.volume_list = None + self.driver = driver diff --git a/cinder/tests/test_netapp_nfs.py b/cinder/tests/test_netapp_nfs.py new file mode 100644 index 0000000000..b7476d5d62 --- /dev/null +++ b/cinder/tests/test_netapp_nfs.py @@ -0,0 +1,695 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs).""" + +from cinder import context +from cinder import exception +from cinder import test + +from cinder.volume import configuration as conf +from cinder.volume.drivers.netapp import api +from cinder.volume.drivers.netapp import nfs as netapp_nfs +from cinder.volume.drivers import nfs +from lxml import etree +from mox import IgnoreArg +from mox import IsA +from mox import MockObject + +import mox +import suds +import types + + +def create_configuration(): + configuration = mox.MockObject(conf.Configuration) + configuration.append_config_values(mox.IgnoreArg()) + return configuration + + +class FakeVolume(object): + def __init__(self, size=0): + self.size = size + self.id = hash(self) + self.name = None + + def __getitem__(self, key): + return self.__dict__[key] + + +class FakeSnapshot(object): + def __init__(self, volume_size=0): + self.volume_name = None + self.name = None + self.volume_id = None + self.volume_size = volume_size + self.user_id = None + self.status = None + + def __getitem__(self, key): + return self.__dict__[key] + + +class FakeResponce(object): + def __init__(self, status): + """ + :param status: Either 'failed' or 'passed' + """ + self.Status = status + + if status == 'failed': + self.Reason = 'Sample error' + + +class NetappNfsDriverTestCase(test.TestCase): + """Test case for NetApp specific NFS clone driver.""" + + def setUp(self): + self._mox = mox.Mox() + self._driver = netapp_nfs.NetAppNFSDriver( + configuration=create_configuration()) + + def tearDown(self): + self._mox.UnsetStubs() + + def test_check_for_setup_error(self): + mox = self._mox + drv = self._driver + required_flags = ['netapp_wsdl_url', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, None) + + # check exception raises when flags are not set + self.assertRaises(exception.CinderException, + drv.check_for_setup_error) + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, 'val') + + mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error') + nfs.NfsDriver.check_for_setup_error() + mox.ReplayAll() + + drv.check_for_setup_error() + + mox.VerifyAll() + + # restore initial FLAGS + for flag in required_flags: + delattr(drv.configuration, flag) + + def test_do_setup(self): + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, 'check_for_setup_error') + mox.StubOutWithMock(drv, '_get_client') + + drv.check_for_setup_error() + drv._get_client() + + mox.ReplayAll() + + drv.do_setup(IsA(context.RequestContext)) + + mox.VerifyAll() + + def test_create_snapshot(self): + """Test snapshot can be created and deleted.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_clone_volume') + drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) + mox.ReplayAll() + + drv.create_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def test_create_volume_from_snapshot(self): + """Tests volume creation from snapshot.""" + drv = self._driver + mox = self._mox + volume = FakeVolume(1) + snapshot = FakeSnapshot(2) + + self.assertRaises(exception.CinderException, + drv.create_volume_from_snapshot, + volume, + snapshot) + + snapshot = FakeSnapshot(1) + + location = '127.0.0.1:/nfs' + expected_result = {'provider_location': location} + mox.StubOutWithMock(drv, '_clone_volume') + mox.StubOutWithMock(drv, '_get_volume_location') + drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) + drv._get_volume_location(IgnoreArg()).AndReturn(location) + + mox.ReplayAll() + + loc = drv.create_volume_from_snapshot(volume, snapshot) + + self.assertEquals(loc, expected_result) + + mox.VerifyAll() + + def _prepare_delete_snapshot_mock(self, snapshot_exists): + drv = self._driver + mox = self._mox + + mox.StubOutWithMock(drv, '_get_provider_location') + mox.StubOutWithMock(drv, '_volume_not_present') + + if snapshot_exists: + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_get_volume_path') + + drv._get_provider_location(IgnoreArg()) + drv._volume_not_present(IgnoreArg(), + IgnoreArg()).AndReturn(not snapshot_exists) + + if snapshot_exists: + drv._get_volume_path(IgnoreArg(), IgnoreArg()) + drv._execute('rm', None, run_as_root=True) + + mox.ReplayAll() + + return mox + + def test_delete_existing_snapshot(self): + drv = self._driver + mox = self._prepare_delete_snapshot_mock(True) + + drv.delete_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def test_delete_missing_snapshot(self): + drv = self._driver + mox = self._prepare_delete_snapshot_mock(False) + + drv.delete_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def _prepare_clone_mock(self, status): + drv = self._driver + mox = self._mox + + volume = FakeVolume() + setattr(volume, 'provider_location', '127.0.0.1:/nfs') + + drv._client = MockObject(suds.client.Client) + drv._client.factory = MockObject(suds.client.Factory) + drv._client.service = MockObject(suds.client.ServiceSelector) + + # ApiProxy() method is generated by ServiceSelector at runtime from the + # XML, so mocking is impossible. + setattr(drv._client.service, + 'ApiProxy', + types.MethodType(lambda *args, **kwargs: FakeResponce(status), + suds.client.ServiceSelector)) + mox.StubOutWithMock(drv, '_get_host_id') + mox.StubOutWithMock(drv, '_get_full_export_path') + + drv._get_host_id(IgnoreArg()).AndReturn('10') + drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs') + + return mox + + def test_successfull_clone_volume(self): + drv = self._driver + mox = self._prepare_clone_mock('passed') + # set required flags + setattr(drv.configuration, 'synchronous_snapshot_create', False) + mox.ReplayAll() + + volume_name = 'volume_name' + clone_name = 'clone_name' + volume_id = volume_name + str(hash(volume_name)) + + drv._clone_volume(volume_name, clone_name, volume_id) + + mox.VerifyAll() + + def test_failed_clone_volume(self): + drv = self._driver + mox = self._prepare_clone_mock('failed') + + mox.ReplayAll() + + volume_name = 'volume_name' + clone_name = 'clone_name' + volume_id = volume_name + str(hash(volume_name)) + + self.assertRaises(exception.CinderException, + drv._clone_volume, + volume_name, clone_name, volume_id) + + mox.VerifyAll() + + def test_cloned_volume_size_fail(self): + volume_clone_fail = FakeVolume(1) + volume_src = FakeVolume(2) + try: + self._driver.create_cloned_volume(volume_clone_fail, + volume_src) + raise AssertionError() + except exception.CinderException: + pass + + +class NetappCmodeNfsDriverTestCase(test.TestCase): + """Test case for NetApp C Mode specific NFS clone driver""" + + def setUp(self): + self._mox = mox.Mox() + self._custom_setup() + + def _custom_setup(self): + self._driver = netapp_nfs.NetAppCmodeNfsDriver( + configuration=create_configuration()) + + def tearDown(self): + self._mox.UnsetStubs() + + def test_check_for_setup_error(self): + mox = self._mox + drv = self._driver + required_flags = [ + 'netapp_wsdl_url', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, None) + # check exception raises when flags are not set + self.assertRaises(exception.CinderException, + drv.check_for_setup_error) + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, 'val') + + mox.ReplayAll() + + drv.check_for_setup_error() + + mox.VerifyAll() + + # restore initial FLAGS + for flag in required_flags: + delattr(drv.configuration, flag) + + def test_do_setup(self): + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, 'check_for_setup_error') + mox.StubOutWithMock(drv, '_get_client') + + drv.check_for_setup_error() + drv._get_client() + + mox.ReplayAll() + + drv.do_setup(IsA(context.RequestContext)) + + mox.VerifyAll() + + def test_create_snapshot(self): + """Test snapshot can be created and deleted""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_clone_volume') + drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) + mox.ReplayAll() + + drv.create_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def test_create_volume_from_snapshot(self): + """Tests volume creation from snapshot""" + drv = self._driver + mox = self._mox + volume = FakeVolume(1) + snapshot = FakeSnapshot(2) + + self.assertRaises(exception.CinderException, + drv.create_volume_from_snapshot, + volume, + snapshot) + + snapshot = FakeSnapshot(1) + + location = '127.0.0.1:/nfs' + expected_result = {'provider_location': location} + mox.StubOutWithMock(drv, '_clone_volume') + mox.StubOutWithMock(drv, '_get_volume_location') + drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) + drv._get_volume_location(IgnoreArg()).AndReturn(location) + + mox.ReplayAll() + + loc = drv.create_volume_from_snapshot(volume, snapshot) + + self.assertEquals(loc, expected_result) + + mox.VerifyAll() + + def _prepare_delete_snapshot_mock(self, snapshot_exists): + drv = self._driver + mox = self._mox + + mox.StubOutWithMock(drv, '_get_provider_location') + mox.StubOutWithMock(drv, '_volume_not_present') + + if snapshot_exists: + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_get_volume_path') + + drv._get_provider_location(IgnoreArg()) + drv._volume_not_present(IgnoreArg(), IgnoreArg())\ + .AndReturn(not snapshot_exists) + + if snapshot_exists: + drv._get_volume_path(IgnoreArg(), IgnoreArg()) + drv._execute('rm', None, run_as_root=True) + + mox.ReplayAll() + + return mox + + def test_delete_existing_snapshot(self): + drv = self._driver + mox = self._prepare_delete_snapshot_mock(True) + + drv.delete_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def test_delete_missing_snapshot(self): + drv = self._driver + mox = self._prepare_delete_snapshot_mock(False) + + drv.delete_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def _prepare_clone_mock(self, status): + drv = self._driver + mox = self._mox + + volume = FakeVolume() + setattr(volume, 'provider_location', '127.0.0.1:/nfs') + + drv._client = MockObject(suds.client.Client) + drv._client.factory = MockObject(suds.client.Factory) + drv._client.service = MockObject(suds.client.ServiceSelector) + # CloneNasFile method is generated by ServiceSelector at runtime from + # the + # XML, so mocking is impossible. + setattr(drv._client.service, + 'CloneNasFile', + types.MethodType(lambda *args, **kwargs: FakeResponce(status), + suds.client.ServiceSelector)) + mox.StubOutWithMock(drv, '_get_host_ip') + mox.StubOutWithMock(drv, '_get_export_path') + + drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1') + drv._get_export_path(IgnoreArg()).AndReturn('/nfs') + return mox + + def test_clone_volume(self): + drv = self._driver + mox = self._prepare_clone_mock('passed') + + mox.ReplayAll() + + volume_name = 'volume_name' + clone_name = 'clone_name' + volume_id = volume_name + str(hash(volume_name)) + + drv._clone_volume(volume_name, clone_name, volume_id) + + mox.VerifyAll() + + def test_cloned_volume_size_fail(self): + volume_clone_fail = FakeVolume(1) + volume_src = FakeVolume(2) + try: + self._driver.create_cloned_volume(volume_clone_fail, + volume_src) + raise AssertionError() + except exception.CinderException: + pass + + +class NetappDirectCmodeNfsDriverTestCase(NetappCmodeNfsDriverTestCase): + """Test direct NetApp C Mode driver""" + def _custom_setup(self): + self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver( + configuration=create_configuration()) + + def test_check_for_setup_error(self): + mox = self._mox + drv = self._driver + required_flags = [ + 'netapp_transport_type', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, None) + # check exception raises when flags are not set + self.assertRaises(exception.CinderException, + drv.check_for_setup_error) + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, 'val') + + mox.ReplayAll() + + drv.check_for_setup_error() + + mox.VerifyAll() + + # restore initial FLAGS + for flag in required_flags: + delattr(drv.configuration, flag) + + def test_do_setup(self): + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, 'check_for_setup_error') + mox.StubOutWithMock(drv, '_get_client') + mox.StubOutWithMock(drv, '_do_custom_setup') + + drv.check_for_setup_error() + drv._get_client() + drv._do_custom_setup(IgnoreArg()) + + mox.ReplayAll() + + drv.do_setup(IsA(context.RequestContext)) + + mox.VerifyAll() + + def _prepare_clone_mock(self, status): + drv = self._driver + mox = self._mox + + volume = FakeVolume() + setattr(volume, 'provider_location', '127.0.0.1:/nfs') + + mox.StubOutWithMock(drv, '_get_host_ip') + mox.StubOutWithMock(drv, '_get_export_path') + mox.StubOutWithMock(drv, '_get_if_info_by_ip') + mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver') + mox.StubOutWithMock(drv, '_clone_file') + + drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1') + drv._get_export_path(IgnoreArg()).AndReturn('/nfs') + drv._get_if_info_by_ip('127.0.0.1').AndReturn( + self._prepare_info_by_ip_response()) + drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol') + drv._clone_file('nfsvol', 'volume_name', 'clone_name', + 'openstack') + return mox + + def _prepare_info_by_ip_response(self): + res = """ + +
127.0.0.1
+ up + fas3170rre-cmode-01 + e1b-1165 + + nfs + + none + + disabled + data + fas3170rre-cmode-01 + e1b-1165 + nfs_data1 + false + true + 255.255.255.0 + 24 + up + data + c10.63.165.0/24 + disabled + openstack +
""" + response_el = etree.XML(res) + return api.NaElement(response_el).get_children() + + def test_clone_volume(self): + drv = self._driver + mox = self._prepare_clone_mock('pass') + + mox.ReplayAll() + + volume_name = 'volume_name' + clone_name = 'clone_name' + volume_id = volume_name + str(hash(volume_name)) + + drv._clone_volume(volume_name, clone_name, volume_id) + + mox.VerifyAll() + + +class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase): + """Test direct NetApp C Mode driver""" + def _custom_setup(self): + self._driver = netapp_nfs.NetAppDirect7modeNfsDriver( + configuration=create_configuration()) + + def test_check_for_setup_error(self): + mox = self._mox + drv = self._driver + required_flags = [ + 'netapp_transport_type', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, None) + # check exception raises when flags are not set + self.assertRaises(exception.CinderException, + drv.check_for_setup_error) + + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, 'val') + + mox.ReplayAll() + + drv.check_for_setup_error() + + mox.VerifyAll() + + # restore initial FLAGS + for flag in required_flags: + delattr(drv.configuration, flag) + + def test_do_setup(self): + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, 'check_for_setup_error') + mox.StubOutWithMock(drv, '_get_client') + mox.StubOutWithMock(drv, '_do_custom_setup') + + drv.check_for_setup_error() + drv._get_client() + drv._do_custom_setup(IgnoreArg()) + + mox.ReplayAll() + + drv.do_setup(IsA(context.RequestContext)) + + mox.VerifyAll() + + def _prepare_clone_mock(self, status): + drv = self._driver + mox = self._mox + + volume = FakeVolume() + setattr(volume, 'provider_location', '127.0.0.1:/nfs') + + mox.StubOutWithMock(drv, '_get_export_path') + mox.StubOutWithMock(drv, '_get_actual_path_for_export') + mox.StubOutWithMock(drv, '_start_clone') + mox.StubOutWithMock(drv, '_wait_for_clone_finish') + if status == 'fail': + mox.StubOutWithMock(drv, '_clear_clone') + + drv._get_export_path(IgnoreArg()).AndReturn('/nfs') + drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs') + drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2')) + if status == 'fail': + drv._wait_for_clone_finish('1', '2').AndRaise( + api.NaApiError('error', 'error')) + drv._clear_clone('1') + else: + drv._wait_for_clone_finish('1', '2') + return mox + + def test_clone_volume_clear(self): + drv = self._driver + mox = self._prepare_clone_mock('fail') + + mox.ReplayAll() + + volume_name = 'volume_name' + clone_name = 'clone_name' + volume_id = volume_name + str(hash(volume_name)) + try: + drv._clone_volume(volume_name, clone_name, volume_id) + except Exception as e: + if isinstance(e, api.NaApiError): + pass + else: + raise e + + mox.VerifyAll() diff --git a/cinder/tests/test_nexenta.py b/cinder/tests/test_nexenta.py new file mode 100644 index 0000000000..aadefc1ffb --- /dev/null +++ b/cinder/tests/test_nexenta.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit tests for OpenStack Cinder volume driver +""" + +import base64 +import urllib2 + +import cinder.flags +from cinder import test +from cinder.volume.drivers import nexenta +from cinder.volume.drivers.nexenta import jsonrpc +from cinder.volume.drivers.nexenta import volume + +FLAGS = cinder.flags.FLAGS + + +class TestNexentaDriver(test.TestCase): + TEST_VOLUME_NAME = 'volume1' + TEST_VOLUME_NAME2 = 'volume2' + TEST_SNAPSHOT_NAME = 'snapshot1' + TEST_VOLUME_REF = { + 'name': TEST_VOLUME_NAME, + 'size': 1, + } + TEST_VOLUME_REF2 = { + 'name': TEST_VOLUME_NAME2, + 'size': 1, + } + TEST_SNAPSHOT_REF = { + 'name': TEST_SNAPSHOT_NAME, + 'volume_name': TEST_VOLUME_NAME, + } + + def __init__(self, method): + super(TestNexentaDriver, self).__init__(method) + + def setUp(self): + super(TestNexentaDriver, self).setUp() + self.flags( + nexenta_host='1.1.1.1', + nexenta_volume='cinder', + nexenta_target_prefix='iqn:', + nexenta_target_group_prefix='cinder/', + nexenta_blocksize='8K', + nexenta_sparse=True, + ) + self.nms_mock = self.mox.CreateMockAnything() + for mod in ['volume', 'zvol', 'iscsitarget', + 'stmf', 'scsidisk', 'snapshot']: + setattr(self.nms_mock, mod, self.mox.CreateMockAnything()) + self.stubs.Set(jsonrpc, 'NexentaJSONProxy', + lambda *_, **__: self.nms_mock) + self.drv = volume.NexentaDriver() + self.drv.do_setup({}) + + def test_setup_error(self): + self.nms_mock.volume.object_exists('cinder').AndReturn(True) + self.mox.ReplayAll() + self.drv.check_for_setup_error() + + def test_setup_error_fail(self): + self.nms_mock.volume.object_exists('cinder').AndReturn(False) + self.mox.ReplayAll() + self.assertRaises(LookupError, self.drv.check_for_setup_error) + + def test_local_path(self): + self.assertRaises(NotImplementedError, self.drv.local_path, '') + + def test_create_volume(self): + self.nms_mock.zvol.create('cinder/volume1', '1G', '8K', True) + self.mox.ReplayAll() + self.drv.create_volume(self.TEST_VOLUME_REF) + + def test_delete_volume(self): + self.nms_mock.zvol.destroy('cinder/volume1', '') + self.mox.ReplayAll() + self.drv.delete_volume(self.TEST_VOLUME_REF) + + def test_create_snapshot(self): + self.nms_mock.zvol.create_snapshot('cinder/volume1', 'snapshot1', '') + self.mox.ReplayAll() + self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) + + def test_create_volume_from_snapshot(self): + self.nms_mock.zvol.clone('cinder/volume1@snapshot1', 'cinder/volume2') + self.mox.ReplayAll() + self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2, + self.TEST_SNAPSHOT_REF) + + def test_delete_snapshot(self): + self.nms_mock.snapshot.destroy('cinder/volume1@snapshot1', '') + self.mox.ReplayAll() + self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) + + _CREATE_EXPORT_METHODS = [ + ('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},), + u'Unable to create iscsi target\n' + u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already' + u' configured\n' + u' itadm create-target failed with error 17\n', ), + ('stmf', 'create_targetgroup', ('cinder/volume1',), + u'Unable to create targetgroup: stmfadm: cinder/volume1:' + u' already exists\n', ), + ('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'), + u'Unable to add member to targetgroup: stmfadm:' + u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', ), + ('scsidisk', 'create_lu', ('cinder/volume1', {}), + u"Unable to create lu with zvol 'cinder/volume1':\n" + u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", ), + ('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', { + 'target_group': 'cinder/volume1', 'lun': '0'}), + u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n" + u" stmfadm: view entry exists\n", ), + ] + + def _stub_export_method(self, module, method, args, error, fail=False): + m = getattr(self.nms_mock, module) + m = getattr(m, method) + mock = m(*args) + if fail: + mock.AndRaise(nexenta.NexentaException(error)) + + def _stub_all_export_methods(self, fail=False): + for params in self._CREATE_EXPORT_METHODS: + self._stub_export_method(*params, fail=fail) + + def test_create_export(self): + self._stub_all_export_methods() + self.mox.ReplayAll() + retval = self.drv.create_export({}, self.TEST_VOLUME_REF) + self.assertEquals( + retval, + {'provider_location': + '%s:%s,1 %s%s 0' % (FLAGS.nexenta_host, + FLAGS.nexenta_iscsi_target_portal_port, + FLAGS.nexenta_target_prefix, + self.TEST_VOLUME_NAME)}) + + def __get_test(i): + def _test_create_export_fail(self): + for params in self._CREATE_EXPORT_METHODS[:i]: + self._stub_export_method(*params) + self._stub_export_method(*self._CREATE_EXPORT_METHODS[i], + fail=True) + self.mox.ReplayAll() + self.assertRaises(nexenta.NexentaException, + self.drv.create_export, + {}, + self.TEST_VOLUME_REF) + return _test_create_export_fail + + for i in range(len(_CREATE_EXPORT_METHODS)): + locals()['test_create_export_fail_%d' % i] = __get_test(i) + + def test_ensure_export(self): + self._stub_all_export_methods(fail=True) + self.mox.ReplayAll() + self.drv.ensure_export({}, self.TEST_VOLUME_REF) + + def test_remove_export(self): + self.nms_mock.scsidisk.delete_lu('cinder/volume1') + self.nms_mock.stmf.destroy_targetgroup('cinder/volume1') + self.nms_mock.iscsitarget.delete_target('iqn:volume1') + self.mox.ReplayAll() + self.drv.remove_export({}, self.TEST_VOLUME_REF) + + def test_remove_export_fail_0(self): + self.nms_mock.scsidisk.delete_lu('cinder/volume1') + self.nms_mock.stmf.destroy_targetgroup( + 'cinder/volume1').AndRaise(nexenta.NexentaException()) + self.nms_mock.iscsitarget.delete_target('iqn:volume1') + self.mox.ReplayAll() + self.drv.remove_export({}, self.TEST_VOLUME_REF) + + def test_remove_export_fail_1(self): + self.nms_mock.scsidisk.delete_lu('cinder/volume1') + self.nms_mock.stmf.destroy_targetgroup('cinder/volume1') + self.nms_mock.iscsitarget.delete_target( + 'iqn:volume1').AndRaise(nexenta.NexentaException()) + self.mox.ReplayAll() + self.drv.remove_export({}, self.TEST_VOLUME_REF) + + def test_get_volume_stats(self): + stats = {'size': '5368709120G', + 'used': '5368709120G', + 'available': '5368709120G', + 'health': 'ONLINE'} + self.nms_mock.volume.get_child_props( + FLAGS.nexenta_volume, + 'health|size|used|available').AndReturn(stats) + self.mox.ReplayAll() + stats = self.drv.get_volume_stats(True) + self.assertEquals(stats['storage_protocol'], 'iSCSI') + self.assertEquals(stats['volume_backend_name'], 'NexentaDriver') + self.assertEquals(stats['total_capacity_gb'], 5368709120.0) + self.assertEquals(stats['free_capacity_gb'], 5368709120.0) + self.assertEquals(stats['reserved_percentage'], 0) + self.assertEquals(stats['QoS_support'], False) + + +class TestNexentaJSONRPC(test.TestCase): + URL = 'http://example.com/' + URL_S = 'https://example.com/' + USER = 'user' + PASSWORD = 'password' + HEADERS = {'Authorization': 'Basic %s' % ( + base64.b64encode(':'.join((USER, PASSWORD))),), + 'Content-Type': 'application/json'} + REQUEST = 'the request' + + def setUp(self): + super(TestNexentaJSONRPC, self).setUp() + self.proxy = jsonrpc.NexentaJSONProxy( + self.URL, self.USER, self.PASSWORD, auto=True) + self.mox.StubOutWithMock(urllib2, 'Request', True) + self.mox.StubOutWithMock(urllib2, 'urlopen') + self.resp_mock = self.mox.CreateMockAnything() + self.resp_info_mock = self.mox.CreateMockAnything() + self.resp_mock.info().AndReturn(self.resp_info_mock) + urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock) + + def test_call(self): + urllib2.Request( + self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = '' + self.resp_mock.read().AndReturn( + '{"error": null, "result": "the result"}') + self.mox.ReplayAll() + result = self.proxy('arg1', 'arg2') + self.assertEquals("the result", result) + + def test_call_deep(self): + urllib2.Request( + self.URL, + '{"object": "obj1.subobj", "params": ["arg1", "arg2"],' + ' "method": "meth"}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = '' + self.resp_mock.read().AndReturn( + '{"error": null, "result": "the result"}') + self.mox.ReplayAll() + result = self.proxy.obj1.subobj.meth('arg1', 'arg2') + self.assertEquals("the result", result) + + def test_call_auto(self): + urllib2.Request( + self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + self.URL_S, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = 'EOF in headers' + self.resp_mock.read().AndReturn( + '{"error": null, "result": "the result"}') + urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock) + self.mox.ReplayAll() + result = self.proxy('arg1', 'arg2') + self.assertEquals("the result", result) + + def test_call_error(self): + urllib2.Request( + self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = '' + self.resp_mock.read().AndReturn( + '{"error": {"message": "the error"}, "result": "the result"}') + self.mox.ReplayAll() + self.assertRaises(jsonrpc.NexentaJSONException, + self.proxy, 'arg1', 'arg2') + + def test_call_fail(self): + urllib2.Request( + self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = 'EOF in headers' + self.proxy.auto = False + self.mox.ReplayAll() + self.assertRaises(jsonrpc.NexentaJSONException, + self.proxy, 'arg1', 'arg2') diff --git a/cinder/tests/test_nfs.py b/cinder/tests/test_nfs.py new file mode 100644 index 0000000000..2427554912 --- /dev/null +++ b/cinder/tests/test_nfs.py @@ -0,0 +1,654 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the NFS driver module.""" + +import __builtin__ +import errno +import os + +from oslo.config import cfg + +import mox as mox_lib +from mox import IgnoreArg +from mox import IsA +from mox import stubout + +from cinder import context +from cinder import exception +from cinder.exception import ProcessExecutionError +from cinder import test + +from cinder.volume import configuration as conf +from cinder.volume.drivers import nfs + + +class DumbVolume(object): + fields = {} + + def __setitem__(self, key, value): + self.fields[key] = value + + def __getitem__(self, item): + return self.fields[item] + + +class RemoteFsDriverTestCase(test.TestCase): + TEST_EXPORT = '1.2.3.4/export1' + TEST_FILE_NAME = 'test.txt' + + def setUp(self): + self._driver = nfs.RemoteFsDriver() + self._mox = mox_lib.Mox() + pass + + def tearDown(self): + self._mox.UnsetStubs() + + def test_create_sparsed_file(self): + (mox, drv) = self._mox, self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('truncate', '-s', '1G', '/path', run_as_root=True).\ + AndReturn("") + + mox.ReplayAll() + + drv._create_sparsed_file('/path', 1) + + mox.VerifyAll() + + def test_create_regular_file(self): + (mox, drv) = self._mox, self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('dd', 'if=/dev/zero', 'of=/path', 'bs=1M', 'count=1024', + run_as_root=True) + + mox.ReplayAll() + + drv._create_regular_file('/path', 1) + + mox.VerifyAll() + + def test_set_rw_permissions_for_all(self): + (mox, drv) = self._mox, self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('chmod', 'ugo+rw', '/path', run_as_root=True) + + mox.ReplayAll() + + drv._set_rw_permissions_for_all('/path') + + mox.VerifyAll() + + def test_get_hash_str(self): + """_get_hash_str should calculation correct value.""" + drv = self._driver + + self.assertEqual('4d664fd43b6ff86d80a4ea969c07b3b9', + drv._get_hash_str(self.TEST_EXPORT)) + + +class NfsDriverTestCase(test.TestCase): + """Test case for NFS driver.""" + + TEST_NFS_EXPORT1 = 'nfs-host1:/export' + TEST_NFS_EXPORT2 = 'nfs-host2:/export' + TEST_SIZE_IN_GB = 1 + TEST_MNT_POINT = '/mnt/nfs' + TEST_MNT_POINT_BASE = '/mnt/test' + TEST_LOCAL_PATH = '/mnt/nfs/volume-123' + TEST_FILE_NAME = 'test.txt' + TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' + ONE_GB_IN_BYTES = 1024 * 1024 * 1024 + + def setUp(self): + self._mox = mox_lib.Mox() + self.stubs = stubout.StubOutForTesting() + self.configuration = mox_lib.MockObject(conf.Configuration) + self.configuration.append_config_values(mox_lib.IgnoreArg()) + self.configuration.nfs_shares_config = None + self.configuration.nfs_mount_options = None + self.configuration.nfs_mount_point_base = '$state_path/mnt' + self.configuration.nfs_disk_util = 'df' + self.configuration.nfs_sparsed_volumes = True + self._driver = nfs.NfsDriver(configuration=self.configuration) + + def tearDown(self): + self._mox.UnsetStubs() + self.stubs.UnsetAll() + + def stub_out_not_replaying(self, obj, attr_name): + attr_to_replace = getattr(obj, attr_name) + stub = mox_lib.MockObject(attr_to_replace) + self.stubs.Set(obj, attr_name, stub) + + def test_local_path(self): + """local_path common use case.""" + self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE + drv = self._driver + + volume = DumbVolume() + volume['provider_location'] = self.TEST_NFS_EXPORT1 + volume['name'] = 'volume-123' + + self.assertEqual( + '/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123', + drv.local_path(volume)) + + def test_mount_nfs_should_mount_correctly(self): + """_mount_nfs common case usage.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1, + self.TEST_MNT_POINT, run_as_root=True) + + mox.ReplayAll() + + drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_mount_nfs_should_suppress_already_mounted_error(self): + """_mount_nfs should suppress already mounted error if ensure=True + """ + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1, + self.TEST_MNT_POINT, run_as_root=True).\ + AndRaise(ProcessExecutionError( + stderr='is busy or already mounted')) + + mox.ReplayAll() + + drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True) + + mox.VerifyAll() + + def test_mount_nfs_should_reraise_already_mounted_error(self): + """_mount_nfs should not suppress already mounted error if ensure=False + """ + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute( + 'mount', + '-t', + 'nfs', + self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, run_as_root=True).\ + AndRaise(ProcessExecutionError(stderr='is busy or ' + 'already mounted')) + + mox.ReplayAll() + + self.assertRaises(ProcessExecutionError, drv._mount_nfs, + self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, + ensure=False) + + mox.VerifyAll() + + def test_mount_nfs_should_create_mountpoint_if_not_yet(self): + """_mount_nfs should create mountpoint if it doesn't exist.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg()) + + mox.ReplayAll() + + drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_mount_nfs_should_not_create_mountpoint_if_already(self): + """_mount_nfs should not create mountpoint if it already exists.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg()) + + mox.ReplayAll() + + drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_get_hash_str(self): + """_get_hash_str should calculation correct value.""" + drv = self._driver + + self.assertEqual('2f4f60214cf43c595666dd815f0360a4', + drv._get_hash_str(self.TEST_NFS_EXPORT1)) + + def test_get_mount_point_for_share(self): + """_get_mount_point_for_share should calculate correct value.""" + drv = self._driver + + self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE + + self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4', + drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) + + def test_get_available_capacity_with_df(self): + """_get_available_capacity should calculate correct value.""" + mox = self._mox + drv = self._driver + + df_total_size = 2620544 + df_avail = 1490560 + df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' + df_data = 'nfs-host:/export %d 996864 %d 41%% /mnt' % (df_total_size, + df_avail) + df_output = df_head + df_data + + self.configuration.nfs_disk_util = 'df' + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT, + run_as_root=True).AndReturn((df_output, None)) + + mox.ReplayAll() + + self.assertEquals((df_avail, df_total_size), + drv._get_available_capacity(self.TEST_NFS_EXPORT1)) + + mox.VerifyAll() + + def test_get_available_capacity_with_du(self): + """_get_available_capacity should calculate correct value.""" + mox = self._mox + drv = self._driver + self.configuration.nfs_disk_util = 'du' + + df_total_size = 2620544 + df_used_size = 996864 + df_avail_size = 1490560 + df_title = 'Filesystem 1-blocks Used Available Use% Mounted on\n' + df_mnt_data = 'nfs-host:/export %d %d %d 41%% /mnt' % (df_total_size, + df_used_size, + df_avail_size) + df_output = df_title + df_mnt_data + + du_used = 490560 + du_output = '%d /mnt' % du_used + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT, + run_as_root=True).\ + AndReturn((df_output, None)) + drv._execute('du', '-sb', '--apparent-size', + '--exclude', '*snapshot*', + self.TEST_MNT_POINT, + run_as_root=True).AndReturn((du_output, None)) + + mox.ReplayAll() + + self.assertEquals((df_total_size - du_used, df_total_size), + drv._get_available_capacity(self.TEST_NFS_EXPORT1)) + + mox.VerifyAll() + + def test_load_shares_config(self): + mox = self._mox + drv = self._driver + + self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + mox.StubOutWithMock(__builtin__, 'open') + config_data = [] + config_data.append(self.TEST_NFS_EXPORT1) + config_data.append('#' + self.TEST_NFS_EXPORT2) + config_data.append('') + __builtin__.open(self.TEST_SHARES_CONFIG_FILE).AndReturn(config_data) + mox.ReplayAll() + + shares = drv._load_shares_config() + + self.assertEqual([self.TEST_NFS_EXPORT1], shares) + + mox.VerifyAll() + + def test_ensure_share_mounted(self): + """_ensure_share_mounted simple use case.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_mount_nfs') + drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True) + + mox.ReplayAll() + + drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) + + mox.VerifyAll() + + def test_ensure_shares_mounted_should_save_mounting_successfully(self): + """_ensure_shares_mounted should save share if mounted with success.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_load_shares_config') + drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1]) + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) + + mox.ReplayAll() + + drv._ensure_shares_mounted() + + self.assertEqual(1, len(drv._mounted_shares)) + self.assertEqual(self.TEST_NFS_EXPORT1, drv._mounted_shares[0]) + + mox.VerifyAll() + + def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): + """_ensure_shares_mounted should not save share if failed to mount.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_load_shares_config') + drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1]) + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_NFS_EXPORT1).AndRaise(Exception()) + + mox.ReplayAll() + + drv._ensure_shares_mounted() + + self.assertEqual(0, len(drv._mounted_shares)) + + mox.VerifyAll() + + def test_setup_should_throw_error_if_shares_config_not_configured(self): + """do_setup should throw error if shares config is not configured.""" + drv = self._driver + self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + self.assertRaises(exception.NfsException, + drv.do_setup, IsA(context.RequestContext)) + + def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): + """do_setup should throw error if nfs client is not installed.""" + mox = self._mox + drv = self._driver + self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + mox.StubOutWithMock(os.path, 'exists') + os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True) + mox.StubOutWithMock(drv, '_execute') + drv._execute('mount.nfs', check_exit_code=False).\ + AndRaise(OSError(errno.ENOENT, 'No such file or directory')) + + mox.ReplayAll() + + self.assertRaises(exception.NfsException, + drv.do_setup, IsA(context.RequestContext)) + + mox.VerifyAll() + + def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): + """_find_share should throw error if there is no mounted shares.""" + drv = self._driver + + drv._mounted_shares = [] + + self.assertRaises(exception.NotFound, drv._find_share, + self.TEST_SIZE_IN_GB) + + def test_find_share(self): + """_find_share simple use case.""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] + + mox.StubOutWithMock(drv, '_get_available_capacity') + drv._get_available_capacity(self.TEST_NFS_EXPORT1).\ + AndReturn((2 * self.ONE_GB_IN_BYTES, 5 * self.ONE_GB_IN_BYTES)) + drv._get_available_capacity(self.TEST_NFS_EXPORT2).\ + AndReturn((3 * self.ONE_GB_IN_BYTES, 10 * self.ONE_GB_IN_BYTES)) + + mox.ReplayAll() + + self.assertEqual(self.TEST_NFS_EXPORT2, + drv._find_share(self.TEST_SIZE_IN_GB)) + + mox.VerifyAll() + + def test_find_share_should_throw_error_if_there_is_no_enough_place(self): + """_find_share should throw error if there is no share to host vol.""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] + + mox.StubOutWithMock(drv, '_get_available_capacity') + drv._get_available_capacity(self.TEST_NFS_EXPORT1).\ + AndReturn((0, 5 * self.ONE_GB_IN_BYTES)) + drv._get_available_capacity(self.TEST_NFS_EXPORT2).\ + AndReturn((0, 10 * self.ONE_GB_IN_BYTES)) + + mox.ReplayAll() + + self.assertRaises(exception.NfsNoSuitableShareFound, drv._find_share, + self.TEST_SIZE_IN_GB) + + mox.VerifyAll() + + def _simple_volume(self): + volume = DumbVolume() + volume['provider_location'] = '127.0.0.1:/mnt' + volume['name'] = 'volume_name' + volume['size'] = 10 + + return volume + + def test_create_sparsed_volume(self): + mox = self._mox + drv = self._driver + volume = self._simple_volume() + + setattr(cfg.CONF, 'nfs_sparsed_volumes', True) + + mox.StubOutWithMock(drv, '_create_sparsed_file') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + drv._create_sparsed_file(IgnoreArg(), IgnoreArg()) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + delattr(cfg.CONF, 'nfs_sparsed_volumes') + + def test_create_nonsparsed_volume(self): + mox = self._mox + drv = self._driver + self.configuration.nfs_sparsed_volumes = False + volume = self._simple_volume() + + setattr(cfg.CONF, 'nfs_sparsed_volumes', False) + + mox.StubOutWithMock(drv, '_create_regular_file') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + drv._create_regular_file(IgnoreArg(), IgnoreArg()) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + delattr(cfg.CONF, 'nfs_sparsed_volumes') + + def test_create_volume_should_ensure_nfs_mounted(self): + """create_volume ensures shares provided in config are mounted.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(nfs, 'LOG') + self.stub_out_not_replaying(drv, '_find_share') + self.stub_out_not_replaying(drv, '_do_create_volume') + + mox.StubOutWithMock(drv, '_ensure_shares_mounted') + drv._ensure_shares_mounted() + + mox.ReplayAll() + + volume = DumbVolume() + volume['size'] = self.TEST_SIZE_IN_GB + drv.create_volume(volume) + + mox.VerifyAll() + + def test_create_volume_should_return_provider_location(self): + """create_volume should return provider_location with found share.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(nfs, 'LOG') + self.stub_out_not_replaying(drv, '_ensure_shares_mounted') + self.stub_out_not_replaying(drv, '_do_create_volume') + + mox.StubOutWithMock(drv, '_find_share') + drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_NFS_EXPORT1) + + mox.ReplayAll() + + volume = DumbVolume() + volume['size'] = self.TEST_SIZE_IN_GB + result = drv.create_volume(volume) + self.assertEqual(self.TEST_NFS_EXPORT1, result['provider_location']) + + mox.VerifyAll() + + def test_delete_volume(self): + """delete_volume simple test case.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = self.TEST_NFS_EXPORT1 + + mox.StubOutWithMock(drv, 'local_path') + drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True) + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_delete_should_ensure_share_mounted(self): + """delete_volume should ensure that corresponding share is mounted.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_execute') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = self.TEST_NFS_EXPORT1 + + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_delete_should_not_delete_if_provider_location_not_provided(self): + """delete_volume shouldn't delete if provider_location missed.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = None + + mox.StubOutWithMock(drv, '_execute') + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_get_volume_stats(self): + """get_volume_stats must fill the correct values""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] + + mox.StubOutWithMock(drv, '_ensure_shares_mounted') + mox.StubOutWithMock(drv, '_get_available_capacity') + + drv._ensure_shares_mounted() + + drv._get_available_capacity(self.TEST_NFS_EXPORT1).\ + AndReturn((2 * self.ONE_GB_IN_BYTES, 10 * self.ONE_GB_IN_BYTES)) + drv._get_available_capacity(self.TEST_NFS_EXPORT2).\ + AndReturn((3 * self.ONE_GB_IN_BYTES, 20 * self.ONE_GB_IN_BYTES)) + + mox.ReplayAll() + + drv.get_volume_stats() + self.assertEqual(drv._stats['total_capacity_gb'], 30.0) + self.assertEqual(drv._stats['free_capacity_gb'], 5.0) + + mox.VerifyAll() diff --git a/cinder/tests/test_policy.py b/cinder/tests/test_policy.py new file mode 100644 index 0000000000..ed221a4ae4 --- /dev/null +++ b/cinder/tests/test_policy.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test of Policy Engine For Cinder.""" + +import os.path +import StringIO +import urllib2 + +from cinder import context +from cinder import exception +from cinder import flags +import cinder.openstack.common.policy +from cinder.openstack.common import policy as common_policy +from cinder import policy +from cinder import test +from cinder import utils + +FLAGS = flags.FLAGS + + +class PolicyFileTestCase(test.TestCase): + def setUp(self): + super(PolicyFileTestCase, self).setUp() + # since is_admin is defined by policy, create context before reset + self.context = context.RequestContext('fake', 'fake') + policy.reset() + self.target = {} + + def tearDown(self): + super(PolicyFileTestCase, self).tearDown() + policy.reset() + + def test_modified_policy_reloads(self): + with utils.tempdir() as tmpdir: + tmpfilename = os.path.join(tmpdir, 'policy') + self.flags(policy_file=tmpfilename) + + action = "example:test" + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": []}""") + policy.enforce(self.context, action, self.target) + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": ["false:false"]}""") + # NOTE(vish): reset stored policy cache so we don't have to + # sleep(1) + policy._POLICY_CACHE = {} + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + +class PolicyTestCase(test.TestCase): + def setUp(self): + super(PolicyTestCase, self).setUp() + policy.reset() + # NOTE(vish): preload rules to circumvent reloading from file + policy.init() + rules = { + "true": [], + "example:allowed": [], + "example:denied": [["false:false"]], + "example:get_http": [["http:http://www.example.com"]], + "example:my_file": [["role:compute_admin"], + ["project_id:%(project_id)s"]], + "example:early_and_fail": [["false:false", "rule:true"]], + "example:early_or_success": [["rule:true"], ["false:false"]], + "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], + "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], + } + # NOTE(vish): then overload underlying brain + common_policy.set_brain(common_policy.HttpBrain(rules)) + self.context = context.RequestContext('fake', 'fake', roles=['member']) + self.target = {} + + def tearDown(self): + policy.reset() + super(PolicyTestCase, self).tearDown() + + def test_enforce_nonexistent_action_throws(self): + action = "example:noexist" + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_enforce_bad_action_throws(self): + action = "example:denied" + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_enforce_good_action(self): + action = "example:allowed" + policy.enforce(self.context, action, self.target) + + def test_enforce_http_true(self): + + def fakeurlopen(url, post_data): + return StringIO.StringIO("True") + self.stubs.Set(urllib2, 'urlopen', fakeurlopen) + action = "example:get_http" + target = {} + result = policy.enforce(self.context, action, target) + self.assertEqual(result, None) + + def test_enforce_http_false(self): + + def fakeurlopen(url, post_data): + return StringIO.StringIO("False") + self.stubs.Set(urllib2, 'urlopen', fakeurlopen) + action = "example:get_http" + target = {} + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def test_templatized_enforcement(self): + target_mine = {'project_id': 'fake'} + target_not_mine = {'project_id': 'another'} + action = "example:my_file" + policy.enforce(self.context, action, target_mine) + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, target_not_mine) + + def test_early_AND_enforcement(self): + action = "example:early_and_fail" + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_early_OR_enforcement(self): + action = "example:early_or_success" + policy.enforce(self.context, action, self.target) + + def test_ignore_case_role_check(self): + lowercase_action = "example:lowercase_admin" + uppercase_action = "example:uppercase_admin" + # NOTE(dprince) we mix case in the Admin role here to ensure + # case is ignored + admin_context = context.RequestContext('admin', + 'fake', + roles=['AdMiN']) + policy.enforce(admin_context, lowercase_action, self.target) + policy.enforce(admin_context, uppercase_action, self.target) + + +class DefaultPolicyTestCase(test.TestCase): + + def setUp(self): + super(DefaultPolicyTestCase, self).setUp() + policy.reset() + policy.init() + + self.rules = { + "default": [], + "example:exist": [["false:false"]] + } + + self._set_brain('default') + + self.context = context.RequestContext('fake', 'fake') + + def _set_brain(self, default_rule): + brain = cinder.openstack.common.policy.HttpBrain(self.rules, + default_rule) + cinder.openstack.common.policy.set_brain(brain) + + def tearDown(self): + super(DefaultPolicyTestCase, self).tearDown() + policy.reset() + + def test_policy_called(self): + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, "example:exist", {}) + + def test_not_found_policy_calls_default(self): + policy.enforce(self.context, "example:noexist", {}) + + def test_default_not_found(self): + self._set_brain("default_noexist") + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, "example:noexist", {}) + + +class ContextIsAdminPolicyTestCase(test.TestCase): + + def setUp(self): + super(ContextIsAdminPolicyTestCase, self).setUp() + policy.reset() + policy.init() + + def test_default_admin_role_is_admin(self): + ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) + self.assertFalse(ctx.is_admin) + ctx = context.RequestContext('fake', 'fake', roles=['admin']) + self.assert_(ctx.is_admin) + + def test_custom_admin_role_is_admin(self): + # define explict rules for context_is_admin + rules = { + 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] + } + brain = common_policy.Brain(rules, FLAGS.policy_default_rule) + common_policy.set_brain(brain) + ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) + self.assert_(ctx.is_admin) + ctx = context.RequestContext('fake', 'fake', roles=['administrator']) + self.assert_(ctx.is_admin) + # default rule no longer applies + ctx = context.RequestContext('fake', 'fake', roles=['admin']) + self.assertFalse(ctx.is_admin) + + def test_context_is_admin_undefined(self): + rules = { + "admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]], + "default": [["rule:admin_or_owner"]], + } + brain = common_policy.Brain(rules, FLAGS.policy_default_rule) + common_policy.set_brain(brain) + ctx = context.RequestContext('fake', 'fake') + self.assertFalse(ctx.is_admin) + ctx = context.RequestContext('fake', 'fake', roles=['admin']) + self.assert_(ctx.is_admin) diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py new file mode 100644 index 0000000000..c31ed52c25 --- /dev/null +++ b/cinder/tests/test_quota.py @@ -0,0 +1,1369 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import context +from cinder import db +from cinder.db.sqlalchemy import api as sqa_api +from cinder.db.sqlalchemy import models as sqa_models +from cinder import exception +from cinder import flags +from cinder.openstack.common import rpc +from cinder.openstack.common import timeutils +from cinder import quota +from cinder import test +import cinder.tests.image.fake +from cinder import volume + + +FLAGS = flags.FLAGS + + +class QuotaIntegrationTestCase(test.TestCase): + + def setUp(self): + super(QuotaIntegrationTestCase, self).setUp() + self.flags(quota_volumes=2, + quota_snapshots=2, + quota_gigabytes=20) + + # Apparently needed by the RPC tests... + #self.network = self.start_service('network') + + self.user_id = 'admin' + self.project_id = 'admin' + self.context = context.RequestContext(self.user_id, + self.project_id, + is_admin=True) + orig_rpc_call = rpc.call + + def rpc_call_wrapper(context, topic, msg, timeout=None): + return orig_rpc_call(context, topic, msg) + + self.stubs.Set(rpc, 'call', rpc_call_wrapper) + + def tearDown(self): + super(QuotaIntegrationTestCase, self).tearDown() + cinder.tests.image.fake.FakeImageService_reset() + + def _create_volume(self, size=10): + """Create a test volume.""" + vol = {} + vol['user_id'] = self.user_id + vol['project_id'] = self.project_id + vol['size'] = size + vol['status'] = 'available' + return db.volume_create(self.context, vol) + + def _create_snapshot(self, volume): + snapshot = {} + snapshot['user_id'] = self.user_id + snapshot['project_id'] = self.project_id + snapshot['volume_id'] = volume['id'] + snapshot['volume_size'] = volume['size'] + snapshot['status'] = 'available' + return db.snapshot_create(self.context, snapshot) + + def test_too_many_volumes(self): + volume_ids = [] + for i in range(FLAGS.quota_volumes): + vol_ref = self._create_volume() + volume_ids.append(vol_ref['id']) + self.assertRaises(exception.QuotaError, + volume.API().create, + self.context, 10, '', '', None) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_gigabytes(self): + volume_ids = [] + vol_ref = self._create_volume(size=20) + volume_ids.append(vol_ref['id']) + self.assertRaises(exception.QuotaError, + volume.API().create, + self.context, 10, '', '', None) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_combined_gigabytes(self): + vol_ref = self._create_volume(size=10) + snap_ref = self._create_snapshot(vol_ref) + self.assertRaises(exception.QuotaError, + volume.API().create_snapshot, + self.context, vol_ref, '', '') + usages = db.quota_usage_get_all_by_project(self.context, + self.project_id) + self.assertEqual(usages['gigabytes']['in_use'], 20) + db.snapshot_destroy(self.context, snap_ref['id']) + db.volume_destroy(self.context, vol_ref['id']) + + def test_no_snapshot_gb_quota_flag(self): + self.flags(quota_volumes=2, + quota_snapshots=2, + quota_gigabytes=20, + no_snapshot_gb_quota=True) + vol_ref = self._create_volume(size=10) + snap_ref = self._create_snapshot(vol_ref) + snap_ref2 = volume.API().create_snapshot(self.context, + vol_ref, '', '') + + # Make sure no reservation was created for snapshot gigabytes. + reservations = db.reservation_get_all_by_project(self.context, + self.project_id) + self.assertEqual(reservations.get('gigabytes'), None) + + # Make sure the snapshot volume_size isn't included in usage. + vol_type = db.volume_type_create(self.context, + dict(name=FLAGS.default_volume_type)) + vol_ref2 = volume.API().create(self.context, 10, '', '') + usages = db.quota_usage_get_all_by_project(self.context, + self.project_id) + self.assertEqual(usages['gigabytes']['in_use'], 20) + + db.snapshot_destroy(self.context, snap_ref['id']) + db.snapshot_destroy(self.context, snap_ref2['id']) + db.volume_destroy(self.context, vol_ref['id']) + db.volume_destroy(self.context, vol_ref2['id']) + db.volume_type_destroy(self.context, vol_type['id']) + + +class FakeContext(object): + def __init__(self, project_id, quota_class): + self.is_admin = False + self.user_id = 'fake_user' + self.project_id = project_id + self.quota_class = quota_class + + def elevated(self): + elevated = self.__class__(self.project_id, self.quota_class) + elevated.is_admin = True + return elevated + + +class FakeDriver(object): + def __init__(self, by_project=None, by_class=None, reservations=None): + self.called = [] + self.by_project = by_project or {} + self.by_class = by_class or {} + self.reservations = reservations or [] + + def get_by_project(self, context, project_id, resource): + self.called.append(('get_by_project', context, project_id, resource)) + try: + return self.by_project[project_id][resource] + except KeyError: + raise exception.ProjectQuotaNotFound(project_id=project_id) + + def get_by_class(self, context, quota_class, resource): + self.called.append(('get_by_class', context, quota_class, resource)) + try: + return self.by_class[quota_class][resource] + except KeyError: + raise exception.QuotaClassNotFound(class_name=quota_class) + + def get_defaults(self, context, resources): + self.called.append(('get_defaults', context, resources)) + return resources + + def get_class_quotas(self, context, resources, quota_class, + defaults=True): + self.called.append(('get_class_quotas', context, resources, + quota_class, defaults)) + return resources + + def get_project_quotas(self, context, resources, project_id, + quota_class=None, defaults=True, usages=True): + self.called.append(('get_project_quotas', context, resources, + project_id, quota_class, defaults, usages)) + return resources + + def limit_check(self, context, resources, values, project_id=None): + self.called.append(('limit_check', context, resources, + values, project_id)) + + def reserve(self, context, resources, deltas, expire=None, + project_id=None): + self.called.append(('reserve', context, resources, deltas, + expire, project_id)) + return self.reservations + + def commit(self, context, reservations, project_id=None): + self.called.append(('commit', context, reservations, project_id)) + + def rollback(self, context, reservations, project_id=None): + self.called.append(('rollback', context, reservations, project_id)) + + def destroy_all_by_project(self, context, project_id): + self.called.append(('destroy_all_by_project', context, project_id)) + + def expire(self, context): + self.called.append(('expire', context)) + + +class BaseResourceTestCase(test.TestCase): + def test_no_flag(self): + resource = quota.BaseResource('test_resource') + + self.assertEqual(resource.name, 'test_resource') + self.assertEqual(resource.flag, None) + self.assertEqual(resource.default, -1) + + def test_with_flag(self): + # We know this flag exists, so use it... + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + + self.assertEqual(resource.name, 'test_resource') + self.assertEqual(resource.flag, 'quota_volumes') + self.assertEqual(resource.default, 10) + + def test_with_flag_no_quota(self): + self.flags(quota_volumes=-1) + resource = quota.BaseResource('test_resource', 'quota_volumes') + + self.assertEqual(resource.name, 'test_resource') + self.assertEqual(resource.flag, 'quota_volumes') + self.assertEqual(resource.default, -1) + + def test_quota_no_project_no_class(self): + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + driver = FakeDriver() + context = FakeContext(None, None) + quota_value = resource.quota(driver, context) + + self.assertEqual(quota_value, 10) + + def test_quota_with_project_no_class(self): + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + driver = FakeDriver( + by_project=dict( + test_project=dict(test_resource=15), )) + context = FakeContext('test_project', None) + quota_value = resource.quota(driver, context) + + self.assertEqual(quota_value, 15) + + def test_quota_no_project_with_class(self): + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + driver = FakeDriver( + by_class=dict( + test_class=dict(test_resource=20), )) + context = FakeContext(None, 'test_class') + quota_value = resource.quota(driver, context) + + self.assertEqual(quota_value, 20) + + def test_quota_with_project_with_class(self): + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + driver = FakeDriver(by_project=dict( + test_project=dict(test_resource=15), ), + by_class=dict(test_class=dict(test_resource=20), )) + context = FakeContext('test_project', 'test_class') + quota_value = resource.quota(driver, context) + + self.assertEqual(quota_value, 15) + + def test_quota_override_project_with_class(self): + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + driver = FakeDriver(by_project=dict( + test_project=dict(test_resource=15), + override_project=dict(test_resource=20), )) + context = FakeContext('test_project', 'test_class') + quota_value = resource.quota(driver, context, + project_id='override_project') + + self.assertEqual(quota_value, 20) + + def test_quota_with_project_override_class(self): + self.flags(quota_volumes=10) + resource = quota.BaseResource('test_resource', 'quota_volumes') + driver = FakeDriver(by_class=dict( + test_class=dict(test_resource=15), + override_class=dict(test_resource=20), )) + context = FakeContext('test_project', 'test_class') + quota_value = resource.quota(driver, context, + quota_class='override_class') + + self.assertEqual(quota_value, 20) + + +class QuotaEngineTestCase(test.TestCase): + def test_init(self): + quota_obj = quota.QuotaEngine() + + self.assertEqual(quota_obj._resources, {}) + self.assertTrue(isinstance(quota_obj._driver, quota.DbQuotaDriver)) + + def test_init_override_string(self): + quota_obj = quota.QuotaEngine( + quota_driver_class='cinder.tests.test_quota.FakeDriver') + + self.assertEqual(quota_obj._resources, {}) + self.assertTrue(isinstance(quota_obj._driver, FakeDriver)) + + def test_init_override_obj(self): + quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) + + self.assertEqual(quota_obj._resources, {}) + self.assertEqual(quota_obj._driver, FakeDriver) + + def test_register_resource(self): + quota_obj = quota.QuotaEngine() + resource = quota.AbsoluteResource('test_resource') + quota_obj.register_resource(resource) + + self.assertEqual(quota_obj._resources, dict(test_resource=resource)) + + def test_register_resources(self): + quota_obj = quota.QuotaEngine() + resources = [ + quota.AbsoluteResource('test_resource1'), + quota.AbsoluteResource('test_resource2'), + quota.AbsoluteResource('test_resource3'), ] + quota_obj.register_resources(resources) + + self.assertEqual(quota_obj._resources, + dict(test_resource1=resources[0], + test_resource2=resources[1], + test_resource3=resources[2], )) + + def test_sync_predeclared(self): + quota_obj = quota.QuotaEngine() + + def spam(*args, **kwargs): + pass + + resource = quota.ReservableResource('test_resource', spam) + quota_obj.register_resource(resource) + + self.assertEqual(resource.sync, spam) + + def test_sync_multi(self): + quota_obj = quota.QuotaEngine() + + def spam(*args, **kwargs): + pass + + resources = [ + quota.ReservableResource('test_resource1', spam), + quota.ReservableResource('test_resource2', spam), + quota.ReservableResource('test_resource3', spam), + quota.ReservableResource('test_resource4', spam), ] + quota_obj.register_resources(resources[:2]) + + self.assertEqual(resources[0].sync, spam) + self.assertEqual(resources[1].sync, spam) + self.assertEqual(resources[2].sync, spam) + self.assertEqual(resources[3].sync, spam) + + def test_get_by_project(self): + context = FakeContext('test_project', 'test_class') + driver = FakeDriver( + by_project=dict( + test_project=dict(test_resource=42))) + quota_obj = quota.QuotaEngine(quota_driver_class=driver) + result = quota_obj.get_by_project(context, 'test_project', + 'test_resource') + + self.assertEqual(driver.called, + [('get_by_project', + context, + 'test_project', + 'test_resource'), ]) + self.assertEqual(result, 42) + + def test_get_by_class(self): + context = FakeContext('test_project', 'test_class') + driver = FakeDriver( + by_class=dict( + test_class=dict(test_resource=42))) + quota_obj = quota.QuotaEngine(quota_driver_class=driver) + result = quota_obj.get_by_class(context, 'test_class', 'test_resource') + + self.assertEqual(driver.called, [('get_by_class', + context, + 'test_class', + 'test_resource'), ]) + self.assertEqual(result, 42) + + def _make_quota_obj(self, driver): + quota_obj = quota.QuotaEngine(quota_driver_class=driver) + resources = [ + quota.AbsoluteResource('test_resource4'), + quota.AbsoluteResource('test_resource3'), + quota.AbsoluteResource('test_resource2'), + quota.AbsoluteResource('test_resource1'), ] + quota_obj.register_resources(resources) + + return quota_obj + + def test_get_defaults(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + result = quota_obj.get_defaults(context) + + self.assertEqual(driver.called, [('get_defaults', + context, + quota_obj._resources), ]) + self.assertEqual(result, quota_obj._resources) + + def test_get_class_quotas(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + result1 = quota_obj.get_class_quotas(context, 'test_class') + result2 = quota_obj.get_class_quotas(context, 'test_class', False) + + self.assertEqual(driver.called, [ + ('get_class_quotas', + context, + quota_obj._resources, + 'test_class', True), + ('get_class_quotas', + context, quota_obj._resources, + 'test_class', False), ]) + self.assertEqual(result1, quota_obj._resources) + self.assertEqual(result2, quota_obj._resources) + + def test_get_project_quotas(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + result1 = quota_obj.get_project_quotas(context, 'test_project') + result2 = quota_obj.get_project_quotas(context, 'test_project', + quota_class='test_class', + defaults=False, + usages=False) + + self.assertEqual(driver.called, [ + ('get_project_quotas', + context, + quota_obj._resources, + 'test_project', + None, + True, + True), + ('get_project_quotas', + context, + quota_obj._resources, + 'test_project', + 'test_class', + False, + False), ]) + self.assertEqual(result1, quota_obj._resources) + self.assertEqual(result2, quota_obj._resources) + + def test_count_no_resource(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + self.assertRaises(exception.QuotaResourceUnknown, + quota_obj.count, context, 'test_resource5', + True, foo='bar') + + def test_count_wrong_resource(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + self.assertRaises(exception.QuotaResourceUnknown, + quota_obj.count, context, 'test_resource1', + True, foo='bar') + + def test_count(self): + def fake_count(context, *args, **kwargs): + self.assertEqual(args, (True,)) + self.assertEqual(kwargs, dict(foo='bar')) + return 5 + + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + quota_obj.register_resource(quota.CountableResource('test_resource5', + fake_count)) + result = quota_obj.count(context, 'test_resource5', True, foo='bar') + + self.assertEqual(result, 5) + + def test_limit_check(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + quota_obj.limit_check(context, test_resource1=4, test_resource2=3, + test_resource3=2, test_resource4=1) + + self.assertEqual(driver.called, [ + ('limit_check', + context, + quota_obj._resources, + dict( + test_resource1=4, + test_resource2=3, + test_resource3=2, + test_resource4=1,), + None), ]) + + def test_reserve(self): + context = FakeContext(None, None) + driver = FakeDriver(reservations=['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) + quota_obj = self._make_quota_obj(driver) + result1 = quota_obj.reserve(context, test_resource1=4, + test_resource2=3, test_resource3=2, + test_resource4=1) + result2 = quota_obj.reserve(context, expire=3600, + test_resource1=1, test_resource2=2, + test_resource3=3, test_resource4=4) + result3 = quota_obj.reserve(context, project_id='fake_project', + test_resource1=1, test_resource2=2, + test_resource3=3, test_resource4=4) + + self.assertEqual(driver.called, [ + ('reserve', + context, + quota_obj._resources, + dict( + test_resource1=4, + test_resource2=3, + test_resource3=2, + test_resource4=1, ), + None, + None), + ('reserve', + context, + quota_obj._resources, + dict( + test_resource1=1, + test_resource2=2, + test_resource3=3, + test_resource4=4, ), + 3600, + None), + ('reserve', + context, + quota_obj._resources, + dict( + test_resource1=1, + test_resource2=2, + test_resource3=3, + test_resource4=4, ), + None, + 'fake_project'), ]) + self.assertEqual(result1, ['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) + self.assertEqual(result2, ['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) + self.assertEqual(result3, ['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) + + def test_commit(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) + + self.assertEqual(driver.called, + [('commit', + context, + ['resv-01', + 'resv-02', + 'resv-03'], + None), ]) + + def test_rollback(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) + + self.assertEqual(driver.called, + [('rollback', + context, + ['resv-01', + 'resv-02', + 'resv-03'], + None), ]) + + def test_destroy_all_by_project(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + quota_obj.destroy_all_by_project(context, 'test_project') + + self.assertEqual(driver.called, + [('destroy_all_by_project', + context, + 'test_project'), ]) + + def test_expire(self): + context = FakeContext(None, None) + driver = FakeDriver() + quota_obj = self._make_quota_obj(driver) + quota_obj.expire(context) + + self.assertEqual(driver.called, [('expire', context), ]) + + def test_resources(self): + quota_obj = self._make_quota_obj(None) + + self.assertEqual(quota_obj.resources, + ['test_resource1', 'test_resource2', + 'test_resource3', 'test_resource4']) + + +class DbQuotaDriverTestCase(test.TestCase): + def setUp(self): + super(DbQuotaDriverTestCase, self).setUp() + + self.flags(quota_volumes=10, + quota_snapshots=10, + quota_gigabytes=1000, + reservation_expire=86400, + until_refresh=0, + max_age=0, + ) + + self.driver = quota.DbQuotaDriver() + + self.calls = [] + + timeutils.set_time_override() + + def tearDown(self): + timeutils.clear_time_override() + super(DbQuotaDriverTestCase, self).tearDown() + + def test_get_defaults(self): + # Use our pre-defined resources + result = self.driver.get_defaults(None, quota.QUOTAS._resources) + + self.assertEqual( + result, + dict( + volumes=10, + snapshots=10, + gigabytes=1000, )) + + def _stub_quota_class_get_all_by_name(self): + # Stub out quota_class_get_all_by_name + def fake_qcgabn(context, quota_class): + self.calls.append('quota_class_get_all_by_name') + self.assertEqual(quota_class, 'test_class') + return dict(gigabytes=500, volumes=10, snapshots=10, ) + self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn) + + def test_get_class_quotas(self): + self._stub_quota_class_get_all_by_name() + result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, + 'test_class') + + self.assertEqual(self.calls, ['quota_class_get_all_by_name']) + self.assertEqual(result, dict(volumes=10, + gigabytes=500, + snapshots=10)) + + def test_get_class_quotas_no_defaults(self): + self._stub_quota_class_get_all_by_name() + result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, + 'test_class', False) + + self.assertEqual(self.calls, ['quota_class_get_all_by_name']) + self.assertEqual(result, dict(volumes=10, + gigabytes=500, + snapshots=10)) + + def _stub_get_by_project(self): + def fake_qgabp(context, project_id): + self.calls.append('quota_get_all_by_project') + self.assertEqual(project_id, 'test_project') + return dict(volumes=10, gigabytes=50, reserved=0, snapshots=10) + + def fake_qugabp(context, project_id): + self.calls.append('quota_usage_get_all_by_project') + self.assertEqual(project_id, 'test_project') + return dict(volumes=dict(in_use=2, reserved=0), + snapshots=dict(in_use=2, reserved=0), + gigabytes=dict(in_use=10, reserved=0), ) + + self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp) + self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp) + + self._stub_quota_class_get_all_by_name() + + def test_get_project_quotas(self): + self._stub_get_by_project() + result = self.driver.get_project_quotas( + FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, 'test_project') + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_all_by_name', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, + in_use=2, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), )) + + def test_get_project_quotas_alt_context_no_class(self): + self._stub_get_by_project() + result = self.driver.get_project_quotas( + FakeContext('other_project', 'other_class'), + quota.QUOTAS._resources, 'test_project') + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, + in_use=2, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), )) + + def test_get_project_quotas_alt_context_with_class(self): + self._stub_get_by_project() + result = self.driver.get_project_quotas( + FakeContext('other_project', 'other_class'), + quota.QUOTAS._resources, 'test_project', quota_class='test_class') + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_all_by_name', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, + in_use=2, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), )) + + def test_get_project_quotas_no_defaults(self): + self._stub_get_by_project() + result = self.driver.get_project_quotas( + FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, 'test_project', defaults=False) + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_all_by_name', ]) + self.assertEqual(result, + dict(gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + volumes=dict(limit=10, + in_use=2, + reserved=0, ), )) + + def test_get_project_quotas_no_usages(self): + self._stub_get_by_project() + result = self.driver.get_project_quotas( + FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, 'test_project', usages=False) + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_class_get_all_by_name', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, ), + snapshots=dict(limit=10, ), + gigabytes=dict(limit=50, ), )) + + def _stub_get_project_quotas(self): + def fake_get_project_quotas(context, resources, project_id, + quota_class=None, defaults=True, + usages=True): + self.calls.append('get_project_quotas') + return dict((k, dict(limit=v.default)) + for k, v in resources.items()) + + self.stubs.Set(self.driver, 'get_project_quotas', + fake_get_project_quotas) + + def test_get_quotas_has_sync_unknown(self): + self._stub_get_project_quotas() + self.assertRaises(exception.QuotaResourceUnknown, + self.driver._get_quotas, + None, quota.QUOTAS._resources, + ['unknown'], True) + self.assertEqual(self.calls, []) + + def test_get_quotas_no_sync_unknown(self): + self._stub_get_project_quotas() + self.assertRaises(exception.QuotaResourceUnknown, + self.driver._get_quotas, + None, quota.QUOTAS._resources, + ['unknown'], False) + self.assertEqual(self.calls, []) + + def test_get_quotas_has_sync_no_sync_resource(self): + self._stub_get_project_quotas() + self.assertRaises(exception.QuotaResourceUnknown, + self.driver._get_quotas, + None, quota.QUOTAS._resources, + ['metadata_items'], True) + self.assertEqual(self.calls, []) + + def test_get_quotas_no_sync_has_sync_resource(self): + self._stub_get_project_quotas() + self.assertRaises(exception.QuotaResourceUnknown, + self.driver._get_quotas, + None, quota.QUOTAS._resources, + ['volumes'], False) + self.assertEqual(self.calls, []) + + def test_get_quotas_has_sync(self): + self._stub_get_project_quotas() + result = self.driver._get_quotas(FakeContext('test_project', + 'test_class'), + quota.QUOTAS._resources, + ['volumes', 'gigabytes'], + True) + + self.assertEqual(self.calls, ['get_project_quotas']) + self.assertEqual(result, dict(volumes=10, gigabytes=1000, )) + + def _stub_quota_reserve(self): + def fake_quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=None): + self.calls.append(('quota_reserve', expire, until_refresh, + max_age)) + return ['resv-1', 'resv-2', 'resv-3'] + self.stubs.Set(db, 'quota_reserve', fake_quota_reserve) + + def test_reserve_bad_expire(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + self.assertRaises(exception.InvalidReservationExpiration, + self.driver.reserve, + FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2), expire='invalid') + self.assertEqual(self.calls, []) + + def test_reserve_default_expire(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + result = self.driver.reserve(FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2)) + + expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) + self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + + def test_reserve_int_expire(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + result = self.driver.reserve(FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2), expire=3600) + + expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) + self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + + def test_reserve_timedelta_expire(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + expire_delta = datetime.timedelta(seconds=60) + result = self.driver.reserve(FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2), expire=expire_delta) + + expire = timeutils.utcnow() + expire_delta + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) + self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + + def test_reserve_datetime_expire(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + expire = timeutils.utcnow() + datetime.timedelta(seconds=120) + result = self.driver.reserve(FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2), expire=expire) + + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) + self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + + def test_reserve_until_refresh(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + self.flags(until_refresh=500) + expire = timeutils.utcnow() + datetime.timedelta(seconds=120) + result = self.driver.reserve(FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2), expire=expire) + + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 500, 0), ]) + self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + + def test_reserve_max_age(self): + self._stub_get_project_quotas() + self._stub_quota_reserve() + self.flags(max_age=86400) + expire = timeutils.utcnow() + datetime.timedelta(seconds=120) + result = self.driver.reserve(FakeContext('test_project', 'test_class'), + quota.QUOTAS._resources, + dict(volumes=2), expire=expire) + + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 86400), ]) + self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + + def _stub_quota_destroy_all_by_project(self): + def fake_quota_destroy_all_by_project(context, project_id): + self.calls.append(('quota_destroy_all_by_project', project_id)) + return None + self.stubs.Set(sqa_api, 'quota_destroy_all_by_project', + fake_quota_destroy_all_by_project) + + def test_destroy_by_project(self): + self._stub_quota_destroy_all_by_project() + self.driver.destroy_all_by_project(FakeContext('test_project', + 'test_class'), + 'test_project') + self.assertEqual(self.calls, [('quota_destroy_all_by_project', + ('test_project')), ]) + + +class FakeSession(object): + def begin(self): + return self + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + return False + + +class FakeUsage(sqa_models.QuotaUsage): + def save(self, *args, **kwargs): + pass + + +class QuotaReserveSqlAlchemyTestCase(test.TestCase): + # cinder.db.sqlalchemy.api.quota_reserve is so complex it needs its + # own test case, and since it's a quota manipulator, this is the + # best place to put it... + + def setUp(self): + super(QuotaReserveSqlAlchemyTestCase, self).setUp() + + self.sync_called = set() + + def make_sync(res_name): + def sync(context, project_id, session): + self.sync_called.add(res_name) + if res_name in self.usages: + if self.usages[res_name].in_use < 0: + return {res_name: 2} + else: + return {res_name: self.usages[res_name].in_use - 1} + return {res_name: 0} + return sync + + self.resources = {} + for res_name in ('volumes', 'gigabytes'): + res = quota.ReservableResource(res_name, make_sync(res_name)) + self.resources[res_name] = res + + self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) + + self.usages = {} + self.usages_created = {} + self.reservations_created = {} + + def fake_get_session(): + return FakeSession() + + def fake_get_quota_usages(context, session, project_id): + return self.usages.copy() + + def fake_quota_usage_create(context, project_id, resource, in_use, + reserved, until_refresh, session=None, + save=True): + quota_usage_ref = self._make_quota_usage( + project_id, resource, in_use, reserved, until_refresh, + timeutils.utcnow(), timeutils.utcnow()) + + self.usages_created[resource] = quota_usage_ref + + return quota_usage_ref + + def fake_reservation_create(context, uuid, usage_id, project_id, + resource, delta, expire, session=None): + reservation_ref = self._make_reservation( + uuid, usage_id, project_id, resource, delta, expire, + timeutils.utcnow(), timeutils.utcnow()) + + self.reservations_created[resource] = reservation_ref + + return reservation_ref + + self.stubs.Set(sqa_api, 'get_session', fake_get_session) + self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages) + self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create) + self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create) + + timeutils.set_time_override() + + def _make_quota_usage(self, project_id, resource, in_use, reserved, + until_refresh, created_at, updated_at): + quota_usage_ref = FakeUsage() + quota_usage_ref.id = len(self.usages) + len(self.usages_created) + quota_usage_ref.project_id = project_id + quota_usage_ref.resource = resource + quota_usage_ref.in_use = in_use + quota_usage_ref.reserved = reserved + quota_usage_ref.until_refresh = until_refresh + quota_usage_ref.created_at = created_at + quota_usage_ref.updated_at = updated_at + quota_usage_ref.deleted_at = None + quota_usage_ref.deleted = False + + return quota_usage_ref + + def init_usage(self, project_id, resource, in_use, reserved, + until_refresh=None, created_at=None, updated_at=None): + if created_at is None: + created_at = timeutils.utcnow() + if updated_at is None: + updated_at = timeutils.utcnow() + + quota_usage_ref = self._make_quota_usage(project_id, resource, in_use, + reserved, until_refresh, + created_at, updated_at) + + self.usages[resource] = quota_usage_ref + + def compare_usage(self, usage_dict, expected): + for usage in expected: + resource = usage['resource'] + for key, value in usage.items(): + actual = getattr(usage_dict[resource], key) + self.assertEqual(actual, value, + "%s != %s on usage for resource %s" % + (actual, value, resource)) + + def _make_reservation(self, uuid, usage_id, project_id, resource, + delta, expire, created_at, updated_at): + reservation_ref = sqa_models.Reservation() + reservation_ref.id = len(self.reservations_created) + reservation_ref.uuid = uuid + reservation_ref.usage_id = usage_id + reservation_ref.project_id = project_id + reservation_ref.resource = resource + reservation_ref.delta = delta + reservation_ref.expire = expire + reservation_ref.created_at = created_at + reservation_ref.updated_at = updated_at + reservation_ref.deleted_at = None + reservation_ref.deleted = False + + return reservation_ref + + def compare_reservation(self, reservations, expected): + reservations = set(reservations) + for resv in expected: + resource = resv['resource'] + resv_obj = self.reservations_created[resource] + + self.assertIn(resv_obj.uuid, reservations) + reservations.discard(resv_obj.uuid) + + for key, value in resv.items(): + actual = getattr(resv_obj, key) + self.assertEqual(actual, value, + "%s != %s on reservation for resource %s" % + (actual, value, resource)) + + self.assertEqual(len(reservations), 0) + + def test_quota_reserve_create_usages(self): + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, + gigabytes=10 * 1024, ) + deltas = dict(volumes=2, + gigabytes=2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 0, 0) + + self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) + self.compare_usage(self.usages_created, + [dict(resource='volumes', + project_id='test_project', + in_use=0, + reserved=2, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=0, + reserved=2 * 1024, + until_refresh=None), ]) + self.compare_reservation( + result, + [dict(resource='volumes', + usage_id=self.usages_created['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages_created['gigabytes'], + delta=2 * 1024), ]) + + def test_quota_reserve_negative_in_use(self): + self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1) + self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, + gigabytes=10 * 1024, ) + deltas = dict(volumes=2, + gigabytes=2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 5, 0) + + self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=2, + reserved=2, + until_refresh=5), + dict(resource='gigabytes', + project_id='test_project', + in_use=2, + reserved=2 * 1024, + until_refresh=5), ]) + self.assertEqual(self.usages_created, {}) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) + + def test_quota_reserve_until_refresh(self): + self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1) + self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 5, 0) + + self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=2, + reserved=2, + until_refresh=5), + dict(resource='gigabytes', + project_id='test_project', + in_use=2, + reserved=2 * 1024, + until_refresh=5), ]) + self.assertEqual(self.usages_created, {}) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) + + def test_quota_reserve_max_age(self): + max_age = 3600 + record_created = (timeutils.utcnow() - + datetime.timedelta(seconds=max_age)) + self.init_usage('test_project', 'volumes', 3, 0, + created_at=record_created, updated_at=record_created) + self.init_usage('test_project', 'gigabytes', 3, 0, + created_at=record_created, updated_at=record_created) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 0, max_age) + + self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=2, + reserved=2, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=2, + reserved=2 * 1024, + until_refresh=None), ]) + self.assertEqual(self.usages_created, {}) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) + + def test_quota_reserve_no_refresh(self): + self.init_usage('test_project', 'volumes', 3, 0) + self.init_usage('test_project', 'gigabytes', 3, 0) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 0, 0) + + self.assertEqual(self.sync_called, set([])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=3, + reserved=2, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=3, + reserved=2 * 1024, + until_refresh=None), ]) + self.assertEqual(self.usages_created, {}) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) + + def test_quota_reserve_unders(self): + self.init_usage('test_project', 'volumes', 1, 0) + self.init_usage('test_project', 'gigabytes', 1 * 1024, 0) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 0, 0) + + self.assertEqual(self.sync_called, set([])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=1, + reserved=0, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=1 * 1024, + reserved=0, + until_refresh=None), ]) + self.assertEqual(self.usages_created, {}) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=-2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=-2 * 1024), ]) + + def test_quota_reserve_overs(self): + self.init_usage('test_project', 'volumes', 4, 0) + self.init_usage('test_project', 'gigabytes', 10 * 1024, 0) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) + self.assertRaises(exception.OverQuota, + sqa_api.quota_reserve, + context, self.resources, quotas, + deltas, self.expire, 0, 0) + + self.assertEqual(self.sync_called, set([])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=4, + reserved=0, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=10 * 1024, + reserved=0, + until_refresh=None), ]) + self.assertEqual(self.usages_created, {}) + self.assertEqual(self.reservations_created, {}) + + def test_quota_reserve_reduction(self): + self.init_usage('test_project', 'volumes', 10, 0) + self.init_usage('test_project', 'gigabytes', 20 * 1024, 0) + context = FakeContext('test_project', 'test_class') + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) + result = sqa_api.quota_reserve(context, self.resources, quotas, + deltas, self.expire, 0, 0) + + self.assertEqual(self.sync_called, set([])) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=10, + reserved=0, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=20 * 1024, + reserved=0, + until_refresh=None), ]) + self.assertEqual(self.usages_created, {}) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=-2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + project_id='test_project', + delta=-2 * 1024), ]) diff --git a/cinder/tests/test_rbd.py b/cinder/tests/test_rbd.py new file mode 100644 index 0000000000..54b6ffffa4 --- /dev/null +++ b/cinder/tests/test_rbd.py @@ -0,0 +1,266 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import mox +import os +import tempfile + +from cinder import db +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import test +from cinder.tests.image import fake as fake_image +from cinder.tests.test_volume import DriverTestCase +from cinder.volume import configuration as conf +from cinder.volume.drivers.rbd import RBDDriver +from cinder.volume.drivers.rbd import VERSION as DRIVER_VERSION + +LOG = logging.getLogger(__name__) + + +class FakeImageService: + def download(self, context, image_id, path): + pass + +RADOS_DF_OUT = """ +{ + "total_space" : "958931232", + "total_used" : "123906196", + "total_objects" : "4221", + "total_avail" : "787024012", + "pools" : [ + { + "name" : "volumes", + "categories" : [ + { + "write_bytes" : "226833", + "size_kb" : "17038386", + "read_bytes" : "221865", + "num_objects" : "4186", + "name" : "", + "size_bytes" : "17447306589", + "write_kb" : "20302730", + "num_object_copies" : "8372", + "read_kb" : "30", + "num_objects_unfound" : "0", + "num_object_clones" : "9", + "num_objects_missing_on_primary" : "0", + "num_objects_degraded" : "0" + } + ], + "id" : "4" + } + ] +} +""" + + +class RBDTestCase(test.TestCase): + + def setUp(self): + super(RBDTestCase, self).setUp() + + def fake_execute(*args, **kwargs): + return '', '' + self._mox = mox.Mox() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.volume_tmp_dir = None + self.configuration.rbd_pool = 'rbd' + self.configuration.rbd_secret_uuid = None + self.configuration.rbd_user = None + self.configuration.append_config_values(mox.IgnoreArg()) + + self.driver = RBDDriver(execute=fake_execute, + configuration=self.configuration) + self._mox.ReplayAll() + + def test_good_locations(self): + locations = ['rbd://fsid/pool/image/snap', + 'rbd://%2F/%2F/%2F/%2F', ] + map(self.driver._parse_location, locations) + + def test_bad_locations(self): + locations = ['rbd://image', + 'http://path/to/somewhere/else', + 'rbd://image/extra', + 'rbd://image/', + 'rbd://fsid/pool/image/', + 'rbd://fsid/pool/image/snap/', + 'rbd://///', ] + for loc in locations: + self.assertRaises(exception.ImageUnacceptable, + self.driver._parse_location, + loc) + self.assertFalse(self.driver._is_cloneable(loc)) + + def test_cloneable(self): + self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') + location = 'rbd://abc/pool/image/snap' + self.assertTrue(self.driver._is_cloneable(location)) + + def test_uncloneable_different_fsid(self): + self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') + location = 'rbd://def/pool/image/snap' + self.assertFalse(self.driver._is_cloneable(location)) + + def test_uncloneable_unreadable(self): + def fake_exc(*args): + raise exception.ProcessExecutionError() + self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') + self.stubs.Set(self.driver, '_execute', fake_exc) + location = 'rbd://abc/pool/image/snap' + self.assertFalse(self.driver._is_cloneable(location)) + + def _copy_image(self): + @contextlib.contextmanager + def fake_temp_file(dir): + class FakeTmp: + def __init__(self, name): + self.name = name + yield FakeTmp('test') + self.stubs.Set(tempfile, 'NamedTemporaryFile', fake_temp_file) + self.stubs.Set(os.path, 'exists', lambda x: True) + self.stubs.Set(image_utils, 'fetch_to_raw', lambda w, x, y, z: None) + self.driver.copy_image_to_volume(None, {'name': 'test', + 'size': 1}, + FakeImageService(), None) + + def test_copy_image_no_volume_tmp(self): + self.configuration.volume_tmp_dir = None + self._copy_image() + + def test_copy_image_volume_tmp(self): + self.configuration.volume_tmp_dir = '/var/run/cinder/tmp' + self._copy_image() + + def test_update_volume_stats(self): + def fake_stats(*args): + return RADOS_DF_OUT, '' + + def fake_safe_get(*args): + return "RBD" + + self.stubs.Set(self.driver, '_execute', fake_stats) + self.stubs.Set(self.driver.configuration, 'safe_get', fake_safe_get) + expected = dict( + volume_backend_name='RBD', + vendor_name='Open Source', + driver_version=DRIVER_VERSION, + storage_protocol='ceph', + total_capacity_gb=914, + free_capacity_gb=750, + reserved_percentage=0) + actual = self.driver.get_volume_stats(True) + self.assertDictMatch(expected, actual) + + def test_update_volume_stats_error(self): + def fake_exc(*args): + raise exception.ProcessExecutionError() + + def fake_safe_get(*args): + return "RBD" + + self.stubs.Set(self.driver, '_execute', fake_exc) + self.stubs.Set(self.driver.configuration, 'safe_get', fake_safe_get) + expected = dict( + volume_backend_name='RBD', + vendor_name='Open Source', + driver_version=DRIVER_VERSION, + storage_protocol='ceph', + total_capacity_gb='unknown', + free_capacity_gb='unknown', + reserved_percentage=0) + actual = self.driver.get_volume_stats(True) + self.assertDictMatch(expected, actual) + + +class ManagedRBDTestCase(DriverTestCase): + driver_name = "cinder.volume.drivers.rbd.RBDDriver" + + def setUp(self): + super(ManagedRBDTestCase, self).setUp() + fake_image.stub_out_image_service(self.stubs) + + def _clone_volume_from_image(self, expected_status, + clone_works=True): + """Try to clone a volume from an image, and check the status + afterwards""" + def fake_clone_image(volume, image_location): + return True + + def fake_clone_error(volume, image_location): + raise exception.CinderException() + + self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) + if clone_works: + self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) + else: + self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error) + + image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + volume_id = 1 + # creating volume testdata + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': timeutils.utcnow(), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'instance_uuid': None, + 'host': 'dummy'}) + try: + if clone_works: + self.volume.create_volume(self.context, + volume_id, + image_id=image_id) + else: + self.assertRaises(exception.CinderException, + self.volume.create_volume, + self.context, + volume_id, + image_id=image_id) + + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], expected_status) + finally: + # cleanup + db.volume_destroy(self.context, volume_id) + + def test_clone_image_status_available(self): + """Verify that before cloning, an image is in the available state.""" + self._clone_volume_from_image('available', True) + + def test_clone_image_status_error(self): + """Verify that before cloning, an image is in the available state.""" + self._clone_volume_from_image('error', False) + + def test_clone_success(self): + self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) + self.stubs.Set(self.volume.driver, 'clone_image', lambda a, b: True) + image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + self.assertTrue(self.volume.driver.clone_image({}, image_id)) + + def test_clone_bad_image_id(self): + self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) + self.assertFalse(self.volume.driver.clone_image({}, None)) + + def test_clone_uncloneable(self): + self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: False) + self.assertFalse(self.volume.driver.clone_image({}, 'dne')) diff --git a/cinder/tests/test_scality.py b/cinder/tests/test_scality.py new file mode 100644 index 0000000000..f32261cc86 --- /dev/null +++ b/cinder/tests/test_scality.py @@ -0,0 +1,185 @@ +# Copyright (c) 2013 Scality +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for the Scality SOFS Volume Driver. +""" + +import errno +import os + +from cinder import exception +from cinder import test +from cinder import utils +from cinder.volume.drivers import scality + + +class ScalityDriverTestCase(test.TestCase): + """Test case for the Scality driver.""" + + TEST_MOUNT = '/tmp/fake_mount' + TEST_CONFIG = '/tmp/fake_config' + TEST_VOLDIR = 'volumes' + + TEST_VOLNAME = 'volume_name' + TEST_VOLSIZE = '0' + TEST_VOLUME = { + 'name': TEST_VOLNAME, + 'size': TEST_VOLSIZE + } + TEST_VOLPATH = os.path.join(TEST_MOUNT, + TEST_VOLDIR, + TEST_VOLNAME) + + TEST_SNAPNAME = 'snapshot_name' + TEST_SNAPSHOT = { + 'name': TEST_SNAPNAME, + 'volume_name': TEST_VOLNAME, + 'volume_size': TEST_VOLSIZE + } + TEST_SNAPPATH = os.path.join(TEST_MOUNT, + TEST_VOLDIR, + TEST_SNAPNAME) + + def _makedirs(self, path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + def _create_fake_config(self): + open(self.TEST_CONFIG, "w+").close() + + def _create_fake_mount(self): + self._makedirs(os.path.join(self.TEST_MOUNT, 'sys')) + self._makedirs(os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR)) + + def _remove_fake_mount(self): + utils.execute('rm', '-rf', self.TEST_MOUNT) + + def _remove_fake_config(self): + try: + os.unlink(self.TEST_CONFIG) + except OSError as e: + if e.errno != errno.ENOENT: + raise e + + def _configure_driver(self): + scality.FLAGS.scality_sofs_config = self.TEST_CONFIG + scality.FLAGS.scality_sofs_mount_point = self.TEST_MOUNT + scality.FLAGS.scality_sofs_volume_dir = self.TEST_VOLDIR + + def _execute_wrapper(self, cmd, *args, **kwargs): + try: + kwargs.pop('run_as_root') + except KeyError: + pass + utils.execute(cmd, *args, **kwargs) + + def _set_access_wrapper(self, is_visible): + + def _access_wrapper(path, flags): + if path == '/sbin/mount.sofs': + return is_visible + else: + return os.access(path, flags) + + self.stubs.Set(os, 'access', _access_wrapper) + + def setUp(self): + super(ScalityDriverTestCase, self).setUp() + + self._remove_fake_mount() + self._driver = scality.ScalityDriver() + self._driver.set_execute(self._execute_wrapper) + + self._create_fake_mount() + self._create_fake_config() + self._configure_driver() + + def tearDown(self): + self._remove_fake_mount() + self._remove_fake_config() + super(ScalityDriverTestCase, self).tearDown() + + def test_setup_no_config(self): + """Missing SOFS configuration shall raise an error.""" + scality.FLAGS.scality_sofs_config = None + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.do_setup, None) + + def test_setup_missing_config(self): + """Non-existent SOFS configuration file shall raise an error.""" + scality.FLAGS.scality_sofs_config = 'nonexistent.conf' + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.do_setup, None) + + def test_setup_no_mount_helper(self): + """SOFS must be installed to use the driver.""" + self._set_access_wrapper(False) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.do_setup, None) + + def test_setup_make_voldir(self): + """The directory for volumes shall be created automatically.""" + self._set_access_wrapper(True) + voldir_path = os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR) + os.rmdir(voldir_path) + self._driver.do_setup(None) + self.assertTrue(os.path.isdir(voldir_path)) + + def test_local_path(self): + """Expected behaviour for local_path.""" + self.assertEqual(self._driver.local_path(self.TEST_VOLUME), + self.TEST_VOLPATH) + + def test_create_volume(self): + """Expected behaviour for create_volume.""" + ret = self._driver.create_volume(self.TEST_VOLUME) + self.assertEqual(ret['provider_location'], + os.path.join(self.TEST_VOLDIR, + self.TEST_VOLNAME)) + self.assertTrue(os.path.isfile(self.TEST_VOLPATH)) + self.assertEqual(os.stat(self.TEST_VOLPATH).st_size, + 100 * 1024 * 1024) + + def test_delete_volume(self): + """Expected behaviour for delete_volume.""" + self._driver.create_volume(self.TEST_VOLUME) + self._driver.delete_volume(self.TEST_VOLUME) + self.assertFalse(os.path.isfile(self.TEST_VOLPATH)) + + def test_create_snapshot(self): + """Expected behaviour for create_snapshot.""" + self._driver.create_volume(self.TEST_VOLUME) + self._driver.create_snapshot(self.TEST_SNAPSHOT) + self.assertTrue(os.path.isfile(self.TEST_SNAPPATH)) + self.assertEqual(os.stat(self.TEST_SNAPPATH).st_size, + 100 * 1024 * 1024) + + def test_delete_snapshot(self): + """Expected behaviour for delete_snapshot.""" + self._driver.create_volume(self.TEST_VOLUME) + self._driver.create_snapshot(self.TEST_SNAPSHOT) + self._driver.delete_snapshot(self.TEST_SNAPSHOT) + self.assertFalse(os.path.isfile(self.TEST_SNAPPATH)) + + def test_initialize_connection(self): + """Expected behaviour for initialize_connection.""" + ret = self._driver.initialize_connection(self.TEST_VOLUME, None) + self.assertEqual(ret['driver_volume_type'], 'scality') + self.assertEqual(ret['data']['sofs_path'], + os.path.join(self.TEST_VOLDIR, + self.TEST_VOLNAME)) diff --git a/cinder/tests/test_service.py b/cinder/tests/test_service.py new file mode 100644 index 0000000000..38bdff33e5 --- /dev/null +++ b/cinder/tests/test_service.py @@ -0,0 +1,224 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for remote procedure calls using queue +""" + +import mox +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import manager +from cinder import service +from cinder import test +from cinder import wsgi + +test_service_opts = [ + cfg.StrOpt("fake_manager", + default="cinder.tests.test_service.FakeManager", + help="Manager for testing"), + cfg.StrOpt("test_service_listen", + default=None, + help="Host to bind test service to"), + cfg.IntOpt("test_service_listen_port", + default=0, + help="Port number to bind test service to"), ] + +flags.FLAGS.register_opts(test_service_opts) + + +class FakeManager(manager.Manager): + """Fake manager for tests""" + def __init__(self, host=None, + db_driver=None, service_name=None): + super(FakeManager, self).__init__(host=host, + db_driver=db_driver) + + def test_method(self): + return 'manager' + + +class ExtendedService(service.Service): + def test_method(self): + return 'service' + + +class ServiceManagerTestCase(test.TestCase): + """Test cases for Services""" + + def test_message_gets_to_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'cinder.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'manager') + + def test_override_manager_method(self): + serv = ExtendedService('test', + 'test', + 'test', + 'cinder.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'service') + + +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'cinder-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'cinder-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(ref['disabled']) + + +class ServiceTestCase(test.TestCase): + """Test cases for Services""" + + def setUp(self): + super(ServiceTestCase, self).setUp() + self.mox.StubOutWithMock(service, 'db') + + def test_create(self): + host = 'foo' + binary = 'cinder-fake' + topic = 'fake' + + # NOTE(vish): Create was moved out of mox replay to make sure that + # the looping calls are created in StartService. + app = service.Service.create(host=host, binary=binary, topic=topic) + + self.assert_(app) + + def test_report_state_newly_disconnected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova'} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova', + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(Exception()) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'cinder.tests.test_service.FakeManager') + serv.start() + serv.report_state() + self.assert_(serv.model_disconnected) + + def test_report_state_newly_connected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova'} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova', + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + service_ref['id']).AndReturn(service_ref) + service.db.service_update(mox.IgnoreArg(), service_ref['id'], + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'cinder.tests.test_service.FakeManager') + serv.start() + serv.model_disconnected = True + serv.report_state() + + self.assert_(not serv.model_disconnected) + + +class TestWSGIService(test.TestCase): + + def setUp(self): + super(TestWSGIService, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + + def test_service_random_port(self): + test_service = service.WSGIService("test_service") + self.assertEquals(0, test_service.port) + test_service.start() + self.assertNotEqual(0, test_service.port) + test_service.stop() + + +class TestLauncher(test.TestCase): + + def setUp(self): + super(TestLauncher, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + self.service = service.WSGIService("test_service") + + def test_launch_app(self): + self.assertEquals(0, self.service.port) + launcher = service.Launcher() + launcher.launch_server(self.service) + self.assertEquals(0, self.service.port) + launcher.stop() diff --git a/cinder/tests/test_share.py b/cinder/tests/test_share.py new file mode 100644 index 0000000000..341bc5ab92 --- /dev/null +++ b/cinder/tests/test_share.py @@ -0,0 +1,368 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Share Code. + +""" + +import datetime +import os + +import mox +import shutil +import tempfile + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.image import image_utils +from cinder.openstack.common import importutils +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder.openstack.common import rpc +import cinder.policy +from cinder.share import manager +from cinder import test +from cinder.tests import fake_flags + +FLAGS = flags.FLAGS + + +class FakeShareDriver(object): + def __init__(self, db, **kwargs): + self.db = db + + def allocate_container(self, context, share): + pass + + def allocate_container_from_snapshot(self, context, share, snapshot): + pass + + def create_snapshot(self, context, snapshot): + pass + + def delete_snapshot(self, context, snapshot): + pass + + def deallocate_container(self, context, share): + pass + + def create_share(self, context, share): + return 'fake_location' + + def delete_share(self, context, share): + pass + + def create_export(self, context, share): + pass + + def remove_export(self, context, share): + pass + + def ensure_share(self, context, share): + pass + + def allow_access(self, context, share, access): + pass + + def deny_access(self, context, share, access): + pass + + def check_for_setup_error(self): + pass + + def get_share_stats(self, refresh=False): + return None + + def do_setup(self, context): + pass + + +class ShareTestCase(test.TestCase): + """Test Case for shares.""" + + def setUp(self): + super(ShareTestCase, self).setUp() + self.flags(connection_type='fake', + share_driver='cinder.tests.test_share.FakeShareDriver') + self.share = importutils.import_object(FLAGS.share_manager) + self.context = context.get_admin_context() + + @staticmethod + def _create_share(status="creating", size=0, snapshot_id=None): + """Create a share object.""" + share = {} + share['share_proto'] = "NFS" + share['size'] = size + share['snapshot_id'] = snapshot_id + share['user_id'] = 'fake' + share['project_id'] = 'fake' + share['availability_zone'] = FLAGS.storage_availability_zone + share['status'] = status + share['host'] = FLAGS.host + return db.share_create(context.get_admin_context(), share) + + @staticmethod + def _create_snapshot(status="creating", size=0, share_id=None): + """Create a snapshot object.""" + snapshot = {} + snapshot['share_proto'] = "NFS" + snapshot['size'] = size + snapshot['share_id'] = share_id + snapshot['user_id'] = 'fake' + snapshot['project_id'] = 'fake' + snapshot['status'] = status + return db.share_snapshot_create(context.get_admin_context(), snapshot) + + @staticmethod + def _create_access(state='new', share_id=None): + """Create a access rule object.""" + access = {} + access['access_type'] = 'fake_type' + access['access_to'] = 'fake_IP' + access['share_id'] = share_id + access['state'] = state + return db.share_access_create(context.get_admin_context(), access) + + def test_init_host_ensuring_shares(self): + """Test init_host for ensuring shares and access rules.""" + + share = self._create_share(status='available') + share_id = share['id'] + + another_share = self._create_share(status='error') + + access = self._create_access(share_id=share_id, state='active') + + self.mox.StubOutWithMock(context, 'get_admin_context') + context.get_admin_context().AndReturn(self.context) + + self.mox.StubOutWithMock(db, 'share_get_all_by_host') + db.share_get_all_by_host(self.context, mox.IgnoreArg())\ + .AndReturn([share, another_share]) + + driver = self.mox.CreateMockAnything(FakeShareDriver) + driver.do_setup(self.context) + driver.check_for_setup_error() + driver.ensure_share(self.context, share) + driver.allow_access(self.context, share, mox.IgnoreArg()) + driver.get_share_stats(refresh=True) + self.share.driver = driver + + self.mox.ReplayAll() + + self.share.init_host() + + def test_create_share_from_snapshot(self): + """Test share can be created from snapshot.""" + share = self._create_share() + share_id = share['id'] + snapshot = self._create_snapshot(share_id=share_id) + snapshot_id = snapshot['id'] + + self.share.create_share(self.context, share_id, + snapshot_id=snapshot_id) + self.assertEqual(share_id, db.share_get(context.get_admin_context(), + share_id).id) + + shr = db.share_get(self.context, share_id) + self.assertEquals(shr['status'], 'available') + + def test_create_delete_share_snapshot(self): + """Test share's snapshot can be created and deleted.""" + + def _fake_create_snapshot(self, context, snapshot): + snapshot['progress'] = '99%' + return snapshot + + self.stubs.Set(FakeShareDriver, "create_snapshot", + _fake_create_snapshot) + + share = self._create_share() + share_id = share['id'] + snapshot = self._create_snapshot(share_id=share_id) + snapshot_id = snapshot['id'] + + self.share.create_snapshot(self.context, share_id, snapshot_id) + self.assertEqual(share_id, + db.share_snapshot_get(context.get_admin_context(), + snapshot_id).share_id) + + snap = db.share_snapshot_get(self.context, snapshot_id) + self.assertEquals(snap['status'], 'available') + + self.share.delete_snapshot(self.context, snapshot_id) + + self.assertEquals('deleted', db.share_snapshot_get( + context.get_admin_context(read_deleted='yes'), snapshot_id).status) + self.assertRaises(exception.NotFound, + db.share_snapshot_get, + self.context, + snapshot_id) + + def test_create_delete_share_snapshot_error(self): + """Test snapshot can be created and deleted with error.""" + + def _fake_create_delete_snapshot(self, context, snapshot): + raise exception.NotFound() + + self.stubs.Set(FakeShareDriver, "create_snapshot", + _fake_create_delete_snapshot) + self.stubs.Set(FakeShareDriver, "delete_snapshot", + _fake_create_delete_snapshot) + + share = self._create_share() + share_id = share['id'] + snapshot = self._create_snapshot(share_id=share_id) + snapshot_id = snapshot['id'] + + self.assertRaises(exception.NotFound, self.share.create_snapshot, + self.context, share_id, snapshot_id) + + snap = db.share_snapshot_get(self.context, snapshot_id) + self.assertEquals(snap['status'], 'error') + + self.assertRaises(exception.NotFound, self.share.delete_snapshot, + self.context, snapshot_id) + + self.assertEquals('error_deleting', db.share_snapshot_get( + self.context, snapshot_id).status) + + def test_delete_share_if_busy(self): + """Test snapshot could not be deleted if busy.""" + + def _fake_delete_snapshot(self, context, snapshot): + raise exception.SnapshotIsBusy(snapshot_name='fakename') + + self.stubs.Set(FakeShareDriver, "delete_snapshot", + _fake_delete_snapshot) + + snapshot = self._create_snapshot(share_id='fake_id') + snapshot_id = snapshot['id'] + + self.share.delete_snapshot(self.context, snapshot_id) + + snap = db.share_snapshot_get(self.context, snapshot_id) + self.assertEquals(snap['status'], 'available') + + def test_create_delete_share(self): + """Test share can be created and deleted.""" + share = self._create_share() + share_id = share['id'] + self._create_access(share_id=share_id) + + self.share.create_share(self.context, share_id) + self.assertEqual(share_id, db.share_get(context.get_admin_context(), + share_id).id) + + shr = db.share_get(self.context, share_id) + self.assertEquals(shr['status'], 'available') + + self.share.delete_share(self.context, share_id) + shr = db.share_get(context.get_admin_context(read_deleted='yes'), + share_id) + + self.assertEquals(shr['status'], 'deleted') + self.assertRaises(exception.NotFound, + db.share_get, + self.context, + share_id) + + def test_create_delete_share_error(self): + """Test share can be created and deleted with error.""" + + def _fake_create_export(self, context, share): + raise exception.NotFound() + + def _fake_deallocate_container(self, context, share): + raise exception.NotFound() + + self.stubs.Set(FakeShareDriver, "create_export", _fake_create_export) + self.stubs.Set(FakeShareDriver, "deallocate_container", + _fake_deallocate_container) + + share = self._create_share() + share_id = share['id'] + self.assertRaises(exception.NotFound, + self.share.create_share, + self.context, + share_id) + + shr = db.share_get(self.context, share_id) + self.assertEquals(shr['status'], 'error') + self.assertRaises(exception.NotFound, + self.share.delete_share, + self.context, + share_id) + + shr = db.share_get(self.context, share_id) + self.assertEquals(shr['status'], 'error_deleting') + + def test_allow_deny_access(self): + """Test access rules to share can be created and deleted.""" + + share = self._create_share() + share_id = share['id'] + access = self._create_access(share_id=share_id) + access_id = access['id'] + self.share.allow_access(self.context, access_id) + self.assertEqual('active', db.share_access_get(self.context, + access_id).state) + + self.share.deny_access(self.context, access_id) + acs = db.share_access_get( + context.get_admin_context(read_deleted='yes'), + access_id) + self.assertEquals(acs['state'], 'deleted') + self.assertRaises(exception.NotFound, + db.share_access_get, + self.context, + access_id) + + def test_allow_deny_access_error(self): + """Test access rules to share can be created and deleted with error.""" + + def _fake_allow_access(self, context, share, access): + raise exception.NotFound() + + def _fake_deny_access(self, context, share, access): + raise exception.NotFound() + + self.stubs.Set(FakeShareDriver, "allow_access", _fake_allow_access) + self.stubs.Set(FakeShareDriver, "deny_access", _fake_deny_access) + + share = self._create_share() + share_id = share['id'] + access = self._create_access(share_id=share_id) + access_id = access['id'] + + self.assertRaises(exception.NotFound, + self.share.allow_access, + self.context, + access_id) + + acs = db.share_access_get(self.context, access_id) + self.assertEquals(acs['state'], 'error') + + self.assertRaises(exception.NotFound, + self.share.deny_access, + self.context, + access_id) + + acs = db.share_access_get(self.context, access_id) + self.assertEquals(acs['state'], 'error') diff --git a/cinder/tests/test_share_api.py b/cinder/tests/test_share_api.py new file mode 100644 index 0000000000..d11e07414f --- /dev/null +++ b/cinder/tests/test_share_api.py @@ -0,0 +1,513 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the Share API module.""" + +import datetime +import mox +import random +import suds + +from cinder import context +from cinder import db as db_driver +from cinder import exception +from cinder.openstack.common import timeutils +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder import share +from cinder.share import api as share_api +from cinder.share import rpcapi as share_rpcapi +from cinder import test +from cinder.tests.db import fakes as db_fakes + + +def fake_share(id, **kwargs): + share = { + 'id': id, + 'size': 1, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'snapshot_id': None, + 'availability_zone': 'fakeaz', + 'status': 'fakestatus', + 'display_name': 'fakename', + 'display_description': 'fakedesc', + 'share_proto': 'nfs', + 'export_location': 'fake_location', + 'host': 'fakehost', + 'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1) + } + share.update(kwargs) + return share + + +def fake_snapshot(id, **kwargs): + snapshot = { + 'id': id, + 'share_size': 1, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'share_id': None, + 'availability_zone': 'fakeaz', + 'status': 'fakestatus', + 'display_name': 'fakename', + 'display_description': 'fakedesc', + 'share_proto': 'nfs', + 'export_location': 'fake_location', + 'progress': 'fakeprogress99%', + 'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1) + } + snapshot.update(kwargs) + return snapshot + + +def fake_access(id, **kwargs): + access = { + 'id': id, + 'share_id': 'fakeshareid', + 'access_type': 'fakeacctype', + 'access_to': 'fakeaccto', + 'state': 'fakeactive', + 'STATE_NEW': 'fakenew', + 'STATE_ACTIVE': 'fakeactive', + 'STATE_DELETING': 'fakedeleting', + 'STATE_DELETED': 'fakedeleted', + 'STATE_ERROR': 'fakeerror', + } + access.update(kwargs) + return db_fakes.FakeModel(access) + + +class ShareAPITestCase(test.TestCase): + def setUp(self): + super(ShareAPITestCase, self).setUp() + self.context = context.get_admin_context() + self.scheduler_rpcapi = self.mox.CreateMock( + scheduler_rpcapi.SchedulerAPI) + self.share_rpcapi = self.mox.CreateMock(share_rpcapi.ShareAPI) + self.api = share.API() + + self.stubs.Set(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi) + self.stubs.Set(self.api, 'share_rpcapi', self.share_rpcapi) + + def tearDown(self): + super(ShareAPITestCase, self).tearDown() + timeutils.clear_time_override() + + def test_create(self): + date = datetime.datetime(1, 1, 1, 1, 1, 1) + timeutils.set_time_override(override_time=date) + share = fake_share('fakeid', + user_id=self.context.user_id, + project_id=self.context.project_id, + status='creating') + options = share.copy() + for name in ('id', 'export_location', 'host', 'launched_at', + 'terminated_at'): + options.pop(name, None) + request_spec = {'share_properties': options, + 'share_proto': share['share_proto'], + 'share_id': share['id'], + 'snapshot_id': share['snapshot_id'], + } + + self.mox.StubOutWithMock(db_driver, 'share_create') + db_driver.share_create(self.context, options).AndReturn(share) + self.scheduler_rpcapi.create_share(self.context, mox.IgnoreArg(), + share['id'], share['snapshot_id'], + request_spec=request_spec, + filter_properties={}) + self.mox.ReplayAll() + self.api.create(self.context, 'nfs', '1', 'fakename', 'fakedesc', + availability_zone='fakeaz') + + def test_create_snapshot(self): + date = datetime.datetime(1, 1, 1, 1, 1, 1) + timeutils.set_time_override(override_time=date) + share = fake_share('fakeid', + status='available') + snapshot = fake_snapshot('fakesnapshotid', + share_id=share['id'], + status='creating') + fake_name = 'fakename' + fake_desc = 'fakedesc' + options = {'share_id': share['id'], + 'user_id': self.context.user_id, + 'project_id': self.context.project_id, + 'status': "creating", + 'progress': '0%', + 'share_size': share['size'], + 'display_name': fake_name, + 'display_description': fake_desc, + 'share_proto': share['share_proto'], + 'export_location': share['export_location']} + + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'create_snapshot', share) + self.mox.StubOutWithMock(db_driver, 'share_snapshot_create') + db_driver.share_snapshot_create(self.context, + options).AndReturn(snapshot) + self.share_rpcapi.create_snapshot(self.context, share, snapshot) + self.mox.ReplayAll() + self.api.create_snapshot(self.context, share, fake_name, fake_desc) + + def test_delete_snapshot(self): + date = datetime.datetime(1, 1, 1, 1, 1, 1) + timeutils.set_time_override(override_time=date) + share = fake_share('fakeid') + snapshot = fake_snapshot('fakesnapshotid', share_id=share['id'], + status='available') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'delete_snapshot', snapshot) + self.mox.StubOutWithMock(db_driver, 'share_snapshot_update') + db_driver.share_snapshot_update(self.context, snapshot['id'], + {'status': 'deleting'}) + self.mox.StubOutWithMock(db_driver, 'share_get') + db_driver.share_get(self.context, + snapshot['share_id']).AndReturn(share) + self.share_rpcapi.delete_snapshot(self.context, snapshot, + share['host']) + self.mox.ReplayAll() + self.api.delete_snapshot(self.context, snapshot) + + def test_delete_snapshot_wrong_status(self): + snapshot = fake_snapshot('fakesnapshotid', share_id='fakeshareid', + status='creating') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'delete_snapshot', snapshot) + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShareSnapshot, + self.api.delete_snapshot, self.context, snapshot) + + def test_create_snapshot_if_share_not_available(self): + share = fake_share('fakeid', + status='error') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'create_snapshot', share) + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShare, self.api.create_snapshot, + self.context, share, 'fakename', 'fakedesc') + + def test_create_from_snapshot_available(self): + date = datetime.datetime(1, 1, 1, 1, 1, 1) + timeutils.set_time_override(override_time=date) + snapshot = fake_snapshot('fakesnapshotid', + share_id='fakeshare_id', + status='available') + share = fake_share('fakeid', + user_id=self.context.user_id, + project_id=self.context.project_id, + snapshot_id=snapshot['id'], + status='creating') + options = share.copy() + for name in ('id', 'export_location', 'host', 'launched_at', + 'terminated_at'): + options.pop(name, None) + request_spec = {'share_properties': options, + 'share_proto': share['share_proto'], + 'share_id': share['id'], + 'snapshot_id': share['snapshot_id'], + } + + self.mox.StubOutWithMock(db_driver, 'share_create') + db_driver.share_create(self.context, options).AndReturn(share) + self.scheduler_rpcapi.create_share(self.context, mox.IgnoreArg(), + share['id'], share['snapshot_id'], + request_spec=request_spec, + filter_properties={}) + self.mox.ReplayAll() + self.api.create(self.context, 'nfs', '1', 'fakename', 'fakedesc', + snapshot=snapshot, availability_zone='fakeaz') + + def test_get_snapshot(self): + fake_get_snap = {'fake_key': 'fake_val'} + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'get_snapshot') + self.mox.StubOutWithMock(db_driver, 'share_snapshot_get') + db_driver.share_snapshot_get(self.context, + 'fakeid').AndReturn(fake_get_snap) + self.mox.ReplayAll() + rule = self.api.get_snapshot(self.context, 'fakeid') + self.assertEqual(rule, fake_get_snap) + + def test_create_from_snapshot_not_available(self): + snapshot = fake_snapshot('fakesnapshotid', + share_id='fakeshare_id', + status='error') + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShareSnapshot, self.api.create, + self.context, 'nfs', '1', 'fakename', + 'fakedesc', snapshot=snapshot, + availability_zone='fakeaz') + + def test_create_wrong_size_0(self): + self.mox.ReplayAll() + self.assertRaises(exception.InvalidInput, self.api.create, + self.context, 'nfs', 0, 'fakename', 'fakedesc', + availability_zone='fakeaz') + + def test_create_wrong_size_some(self): + self.mox.ReplayAll() + self.assertRaises(exception.InvalidInput, self.api.create, + self.context, 'nfs', 'some', 'fakename', + 'fakedesc', availability_zone='fakeaz') + + def test_delete_available(self): + date = datetime.datetime(2, 2, 2, 2, 2, 2) + timeutils.set_time_override(override_time=date) + share = fake_share('fakeid', status='available') + options = {'status': 'deleting', + 'terminated_at': date} + deleting_share = share.copy() + deleting_share.update(options) + + self.mox.StubOutWithMock(db_driver, 'share_update') + db_driver.share_update(self.context, share['id'], options).\ + AndReturn(deleting_share) + self.share_rpcapi.delete_share(self.context, deleting_share) + self.mox.ReplayAll() + self.api.delete(self.context, share) + self.mox.UnsetStubs() + self.mox.VerifyAll() + + def test_delete_error(self): + date = datetime.datetime(2, 2, 2, 2, 2, 2) + timeutils.set_time_override(override_time=date) + share = fake_share('fakeid', status='error') + options = {'status': 'deleting', + 'terminated_at': date} + deleting_share = share.copy() + deleting_share.update(options) + + self.mox.StubOutWithMock(db_driver, 'share_update') + db_driver.share_update(self.context, share['id'], options).\ + AndReturn(deleting_share) + self.share_rpcapi.delete_share(self.context, deleting_share) + self.mox.ReplayAll() + self.api.delete(self.context, share) + self.mox.UnsetStubs() + self.mox.VerifyAll() + + def test_delete_wrong_status(self): + share = fake_share('fakeid') + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShare, self.api.delete, + self.context, share) + + def test_delete_no_host(self): + share = fake_share('fakeid') + share['host'] = None + + self.mox.StubOutWithMock(db_driver, 'share_delete') + db_driver.share_delete(self.context, 'fakeid') + self.mox.ReplayAll() + self.api.delete(self.context, share) + + def test_get(self): + self.mox.StubOutWithMock(db_driver, 'share_get') + db_driver.share_get(self.context, 'fakeid').AndReturn('fakeshare') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'get', 'fakeshare') + self.mox.ReplayAll() + result = self.api.get(self.context, 'fakeid') + self.assertEqual(result, 'fakeshare') + + def test_get_all_admin_not_all_tenants(self): + ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=True) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(ctx, 'get_all') + self.mox.StubOutWithMock(db_driver, 'share_get_all_by_project') + db_driver.share_get_all_by_project(ctx, 'fakepid') + self.mox.ReplayAll() + self.api.get_all(ctx) + + def test_get_all_admin_all_tenants(self): + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'get_all') + self.mox.StubOutWithMock(db_driver, 'share_get_all') + db_driver.share_get_all(self.context) + self.mox.ReplayAll() + self.api.get_all(self.context, search_opts={'all_tenants': 1}) + + def test_get_all_not_admin(self): + ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(ctx, 'get_all') + self.mox.StubOutWithMock(db_driver, 'share_get_all_by_project') + db_driver.share_get_all_by_project(ctx, 'fakepid') + self.mox.ReplayAll() + self.api.get_all(ctx) + + def test_get_all_not_admin_search_opts(self): + search_opts = {'size': 'fakesize'} + fake_objs = [{'name': 'fakename1'}, search_opts] + ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(ctx, 'get_all') + self.mox.StubOutWithMock(db_driver, 'share_get_all_by_project') + db_driver.share_get_all_by_project(ctx, + 'fakepid').AndReturn(fake_objs) + self.mox.ReplayAll() + result = self.api.get_all(ctx, search_opts) + self.assertEqual([search_opts], result) + + def test_get_all_snapshots_admin_not_all_tenants(self): + ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=True) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(ctx, 'get_all_snapshots') + self.mox.StubOutWithMock(db_driver, + 'share_snapshot_get_all_by_project') + db_driver.share_snapshot_get_all_by_project(ctx, 'fakepid') + self.mox.ReplayAll() + self.api.get_all_snapshots(ctx) + + def test_get_all_snapshots_admin_all_tenants(self): + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'get_all_snapshots') + self.mox.StubOutWithMock(db_driver, 'share_snapshot_get_all') + db_driver.share_snapshot_get_all(self.context) + self.mox.ReplayAll() + self.api.get_all_snapshots(self.context, + search_opts={'all_tenants': 1}) + + def test_get_all_snapshots_not_admin(self): + ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(ctx, 'get_all_snapshots') + self.mox.StubOutWithMock(db_driver, + 'share_snapshot_get_all_by_project') + db_driver.share_snapshot_get_all_by_project(ctx, 'fakepid') + self.mox.ReplayAll() + self.api.get_all_snapshots(ctx) + + def test_get_all_snapshots_not_admin_search_opts(self): + search_opts = {'size': 'fakesize'} + fake_objs = [{'name': 'fakename1'}, search_opts] + ctx = context.RequestContext('fakeuid', 'fakepid', id_admin=False) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(ctx, 'get_all_snapshots') + self.mox.StubOutWithMock(db_driver, + 'share_snapshot_get_all_by_project') + db_driver.share_snapshot_get_all_by_project(ctx, 'fakepid').\ + AndReturn(fake_objs) + self.mox.ReplayAll() + result = self.api.get_all_snapshots(ctx, search_opts) + self.assertEqual([search_opts], result) + + def test_allow_access(self): + share = fake_share('fakeid', status='available') + values = {'share_id': share['id'], + 'access_type': 'fakeacctype', + 'access_to': 'fakeaccto'} + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'allow_access') + self.mox.StubOutWithMock(db_driver, 'share_access_create') + db_driver.share_access_create(self.context, values).\ + AndReturn('fakeacc') + self.share_rpcapi.allow_access(self.context, share, 'fakeacc') + self.mox.ReplayAll() + access = self.api.allow_access(self.context, share, 'fakeacctype', + 'fakeaccto') + self.assertEqual(access, 'fakeacc') + + def test_allow_access_status_not_available(self): + share = fake_share('fakeid', status='error') + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShare, self.api.allow_access, + self.context, share, 'fakeacctype', 'fakeaccto') + + def test_allow_access_no_host(self): + share = fake_share('fakeid', host=None) + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShare, self.api.allow_access, + self.context, share, 'fakeacctype', 'fakeaccto') + + def test_deny_access_error(self): + share = fake_share('fakeid', status='available') + access = fake_access('fakaccid', state='fakeerror') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'deny_access') + self.mox.StubOutWithMock(db_driver, 'share_access_delete') + db_driver.share_access_delete(self.context, access['id']) + self.mox.ReplayAll() + self.api.deny_access(self.context, share, access) + + def test_deny_access_active(self): + share = fake_share('fakeid', status='available') + access = fake_access('fakaccid', state='fakeactive') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'deny_access') + self.mox.StubOutWithMock(db_driver, 'share_access_update') + db_driver.share_access_update(self.context, access['id'], + {'state': 'fakedeleting'}) + self.share_rpcapi.deny_access(self.context, share, access) + self.mox.ReplayAll() + self.api.deny_access(self.context, share, access) + + def test_deny_access_not_active_not_error(self): + share = fake_share('fakeid', status='available') + access = fake_access('fakaccid', state='fakenew') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'deny_access') + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShareAccess, self.api.deny_access, + self.context, share, access) + + def test_deny_access_status_not_available(self): + share = fake_share('fakeid', status='error') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'deny_access') + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShare, self.api.deny_access, + self.context, share, 'fakeacc') + + def test_deny_access_no_host(self): + share = fake_share('fakeid', host=None) + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'deny_access') + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShare, self.api.deny_access, + self.context, share, 'fakeacc') + + def test_access_get(self): + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'access_get') + self.mox.StubOutWithMock(db_driver, 'share_access_get') + db_driver.share_access_get(self.context, 'fakeid').AndReturn('fake') + self.mox.ReplayAll() + rule = self.api.access_get(self.context, 'fakeid') + self.assertEqual(rule, 'fake') + + def test_access_get_all(self): + share = fake_share('fakeid') + self.mox.StubOutWithMock(share_api, 'check_policy') + share_api.check_policy(self.context, 'access_get_all') + self.mox.StubOutWithMock(db_driver, 'share_access_get_all_for_share') + db_driver.share_access_get_all_for_share(self.context, 'fakeid').\ + AndReturn([fake_access('fakeacc0id', state='fakenew'), + fake_access('fakeacc1id', state='fakeerror')]) + self.mox.ReplayAll() + rules = self.api.access_get_all(self.context, share) + self.assertEqual(rules, [{'id': 'fakeacc0id', + 'access_type': 'fakeacctype', + 'access_to': 'fakeaccto', + 'state': 'fakenew'}, + {'id': 'fakeacc1id', + 'access_type': 'fakeacctype', + 'access_to': 'fakeaccto', + 'state': 'fakeerror'}]) diff --git a/cinder/tests/test_share_driver.py b/cinder/tests/test_share_driver.py new file mode 100644 index 0000000000..92eb77924a --- /dev/null +++ b/cinder/tests/test_share_driver.py @@ -0,0 +1,49 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the Share driver module.""" + +import time + +from cinder import exception +from cinder.share.configuration import Configuration +from cinder.share import driver +from cinder import test +from cinder import utils + + +def fake_execute_with_raise(*cmd, **kwargs): + raise exception.ProcessExecutionError + + +def fake_sleep(duration): + pass + + +class ShareDriverTestCase(test.TestCase): + def setUp(self): + super(ShareDriverTestCase, self).setUp() + self.utils = utils + self.stubs.Set(self.utils, 'execute', fake_execute_with_raise) + self.time = time + self.stubs.Set(self.time, 'sleep', fake_sleep) + + def tearDown(self): + super(ShareDriverTestCase, self).tearDown() + + def test__try_execute(self): + execute_mixin = driver.ExecuteMixin(configuration=Configuration(None)) + self.assertRaises(exception.ProcessExecutionError, + execute_mixin._try_execute) diff --git a/cinder/tests/test_share_lvm.py b/cinder/tests/test_share_lvm.py new file mode 100644 index 0000000000..b2dc8bf89e --- /dev/null +++ b/cinder/tests/test_share_lvm.py @@ -0,0 +1,754 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the NFS driver module.""" + +import mox +import os + +from cinder import context +from cinder.db.sqlalchemy import models +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.share.configuration import Configuration +from cinder.share.drivers import lvm +from cinder import test +from cinder.tests.db import fakes as db_fakes +from cinder.tests import fake_utils + + +FLAGS = flags.FLAGS + + +def fake_share(**kwargs): + share = { + 'id': 'fakeid', + 'name': 'fakename', + 'size': 1, + 'share_proto': 'NFS', + 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', + } + share.update(kwargs) + return db_fakes.FakeModel(share) + + +def fake_snapshot(**kwargs): + snapshot = { + 'id': 'fakesnapshotid', + 'share_name': 'fakename', + 'share_id': 'fakeid', + 'name': 'fakesnapshotname', + 'share_size': 1, + 'share_proto': 'NFS', + 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', + } + snapshot.update(kwargs) + return db_fakes.FakeModel(snapshot) + + +def fake_access(**kwargs): + access = { + 'id': 'fakeaccid', + 'access_type': 'ip', + 'access_to': '10.0.0.2', + 'state': 'active', + } + access.update(kwargs) + return db_fakes.FakeModel(access) + + +class LVMShareDriverTestCase(test.TestCase): + """Tests LVMShareDriver.""" + + def setUp(self): + super(LVMShareDriverTestCase, self).setUp() + fake_utils.stub_out_utils_execute(self.stubs) + self._execute = fake_utils.fake_execute + self._context = context.get_admin_context() + + FLAGS.set_default('share_volume_group', 'fakevg') + FLAGS.set_default('share_export_ip', '10.0.0.1') + + self._helper_cifs = self.mox.CreateMock(lvm.CIFSNetConfHelper) + self._helper_nfs = self.mox.CreateMock(lvm.NFSHelper) + self.fake_conf = Configuration(None) + self._db = self.mox.CreateMockAnything() + self._driver = lvm.LVMShareDriver(self._db, + execute=self._execute, + configuration=self.fake_conf) + self._driver._helpers = { + 'CIFS': self._helper_cifs, + 'NFS': self._helper_nfs, + } + + self.share = fake_share() + self.access = fake_access() + self.snapshot = fake_snapshot() + + def tearDown(self): + super(LVMShareDriverTestCase, self).tearDown() + fake_utils.fake_execute_set_repliers([]) + fake_utils.fake_execute_clear_log() + + def test_do_setup(self): + self.mox.StubOutWithMock(importutils, 'import_class') + helpers = [ + (self._helper_cifs, 'cinder.share.drivers.lvm.CIFSNetConfHelper'), + (self._helper_nfs, 'cinder.share.drivers.lvm.NFSHelper'), + ] + for helper, path in helpers: + importutils.import_class(path).AndReturn(helper) + helper.__call__(fake_utils.fake_execute, + self.fake_conf).\ + AndReturn(helper) + helper.init() + self.mox.ReplayAll() + self._driver.do_setup(self._context) + expected_helpers = { + 'CIFS': self._helper_cifs, + 'NFS': self._helper_nfs, + } + self.assertEqual(self._driver._helpers, expected_helpers) + + def test_check_for_setup_error(self): + def exec_runner(*ignore_args, **ignore_kwargs): + return '\n fake1\n fakevg\n fake2\n', '' + + expected_exec = [ + 'vgs --noheadings -o name', + ] + fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) + self.mox.ReplayAll() + ret = self._driver.check_for_setup_error() + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_check_for_setup_error_no_vg(self): + def exec_runner(*ignore_args, **ignore_kwargs): + return '\n fake0\n fake1\n fake2\n', '' + + fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', + exec_runner)]) + self.mox.ReplayAll() + self.assertRaises(exception.InvalidParameterValue, + self._driver.check_for_setup_error) + + def test_check_for_setup_error_no_export_ip(self): + def exec_runner(*ignore_args, **ignore_kwargs): + return '\n fake1\n fakevg\n fake2\n', '' + + fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', + exec_runner)]) + FLAGS.set_default('share_export_ip', None) + self.mox.ReplayAll() + self.assertRaises(exception.InvalidParameterValue, + self._driver.check_for_setup_error) + + def test_local_path_normal(self): + share = fake_share(name='fake_sharename') + FLAGS.set_default('share_volume_group', 'fake_vg') + self.mox.ReplayAll() + ret = self._driver._local_path(share) + self.assertEqual(ret, '/dev/mapper/fake_vg-fake_sharename') + + def test_local_path_escapes(self): + share = fake_share(name='fake-sharename') + FLAGS.set_default('share_volume_group', 'fake-vg') + self.mox.ReplayAll() + ret = self._driver._local_path(share) + self.assertEqual(ret, '/dev/mapper/fake--vg-fake--sharename') + + def test_allocate_container_normal(self): + FLAGS.set_default('share_lvm_mirrors', 0) + self.mox.ReplayAll() + ret = self._driver.allocate_container(self._context, self.share) + expected_exec = [ + 'lvcreate -L 1G -n fakename fakevg', + 'mkfs.ext4 /dev/mapper/fakevg-fakename', + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_allocate_container_from_snapshot(self): + FLAGS.set_default('share_lvm_mirrors', 0) + mount_share = '/dev/mapper/fakevg-fakename' + mount_snapshot = '/dev/mapper/fakevg-fakesnapshotname' + self.mox.ReplayAll() + ret = self._driver.allocate_container_from_snapshot(self._context, + self.share, + self.snapshot) + expected_exec = [ + 'lvcreate -L 1G -n fakename fakevg', + ("dd count=0 if=%s of=%s iflag=direct oflag=direct" % + (mount_snapshot, mount_share)), + ("dd if=%s of=%s count=1024 bs=1M iflag=direct oflag=direct" % + (mount_snapshot, mount_share)), + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_allocate_container_from_snapshot_without_extra(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError() + + FLAGS.set_default('share_lvm_mirrors', 0) + mount_share = '/dev/mapper/fakevg-fakename' + mount_snapshot = '/dev/mapper/fakevg-fakesnapshotname' + expected_exec = [ + 'lvcreate -L 1G -n fakename fakevg', + ("dd count=0 if=%s of=%s iflag=direct oflag=direct" % + (mount_snapshot, mount_share)), + "dd if=%s of=%s count=1024 bs=1M" % (mount_snapshot, mount_share), + ] + fake_utils.fake_execute_set_repliers([(expected_exec[1], exec_runner)]) + self.mox.ReplayAll() + ret = self._driver.allocate_container_from_snapshot(self._context, + self.share, + self.snapshot) + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_allocate_container_mirrors(self): + share = fake_share(size='2048') + FLAGS.set_default('share_lvm_mirrors', 2) + self.mox.ReplayAll() + ret = self._driver.allocate_container(self._context, share) + expected_exec = [ + 'lvcreate -L 2048G -n fakename fakevg -m 2 --nosync -R 2', + 'mkfs.ext4 /dev/mapper/fakevg-fakename', + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_deallocate_container(self): + self.mox.ReplayAll() + expected_exec = ['lvremove -f fakevg/fakename'] + ret = self._driver.deallocate_container(self._context, self.share) + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_get_share_stats(self): + def exec_runner(*ignore_args, **ignore_kwargs): + return '\n fakevg 5.38 4.30\n', '' + + expected_exec = [ + 'vgs --noheadings --nosuffix --unit=G -o name,size,free fakevg', + ] + fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) + FLAGS.set_default('reserved_percentage', 1) + self.mox.ReplayAll() + ret = self._driver.get_share_stats(refresh=True) + expected_ret = { + 'share_backend_name': 'LVM', + 'vendor_name': 'Open Source', + 'driver_version': '1.0', + 'storage_protocol': 'NFS_CIFS', + 'total_capacity_gb': 5.38, + 'free_capacity_gb': 4.30, + 'reserved_percentage': 0, + 'QoS_support': False, + } + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + self.assertEqual(ret, expected_ret) + + def test_get_share_stats_error(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError() + + expected_exec = [ + 'vgs --noheadings --nosuffix --unit=G -o name,size,free fakevg', + ] + fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) + FLAGS.set_default('reserved_percentage', 1) + self.mox.ReplayAll() + ret = self._driver.get_share_stats(refresh=True) + expected_ret = { + 'share_backend_name': 'LVM', + 'vendor_name': 'Open Source', + 'driver_version': '1.0', + 'storage_protocol': 'NFS_CIFS', + 'total_capacity_gb': 0, + 'free_capacity_gb': 0, + 'reserved_percentage': 0, + 'QoS_support': False, + } + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + self.assertEqual(ret, expected_ret) + + def test_create_export(self): + self.mox.StubOutWithMock(self._driver, '_mount_device') + self._driver._mount_device(self.share, '/dev/mapper/fakevg-fakename').\ + AndReturn('fakelocation') + self.mox.ReplayAll() + ret = self._driver.create_export(self._context, self.share) + expected_ret = { + 'provider_location': 'fakelocation', + } + self.assertEqual(ret, expected_ret) + + def test_remove_export(self): + mount_path = self._get_mount_path(self.share) + + self.mox.StubOutWithMock(os.path, 'exists') + os.path.exists(mount_path).AndReturn(True) + + self.mox.StubOutWithMock(os, 'rmdir') + os.rmdir(mount_path) + + self.mox.ReplayAll() + self._driver.remove_export(self._context, self.share) + expected_exec = [ + "umount -f %s" % (mount_path,), + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_remove_export_is_busy_error(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError(stderr='device is busy') + + mount_path = self._get_mount_path(self.share) + expected_exec = [ + "umount -f %s" % (mount_path), + ] + fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) + self.mox.StubOutWithMock(os.path, 'exists') + os.path.exists(mount_path).AndReturn(True) + self.mox.ReplayAll() + self.assertRaises(exception.ShareIsBusy, self._driver.remove_export, + self._context, self.share) + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_remove_export_error(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError(stderr='fake error') + + mount_path = self._get_mount_path(self.share) + expected_exec = [ + "umount -f %s" % (mount_path), + ] + fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) + self.mox.StubOutWithMock(os.path, 'exists') + os.path.exists(mount_path).AndReturn(True) + self.mox.ReplayAll() + self._driver.remove_export(self._context, self.share) + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_create_share(self): + mount_path = self._get_mount_path(self.share) + self._helper_nfs.create_export(mount_path, self.share['name']).\ + AndReturn('fakelocation') + self.mox.ReplayAll() + ret = self._driver.create_share(self._context, self.share) + self.assertEqual(ret, 'fakelocation') + + def test_create_snapshot(self): + self.mox.ReplayAll() + self._driver.create_snapshot(self._context, self.snapshot) + expected_exec = [ + ("lvcreate -L 1G --name fakesnapshotname --snapshot %s/fakename" % + (FLAGS.share_volume_group,)), + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_ensure_share(self): + mount_path = self._get_mount_path(self.share) + self.mox.StubOutWithMock(self._driver, '_mount_device') + self._driver._mount_device(self.share, '/dev/mapper/fakevg-fakename').\ + AndReturn(mount_path) + self._helper_nfs.create_export(mount_path, self.share['name'], + recreate=True).AndReturn('fakelocation') + self.mox.ReplayAll() + self._driver.ensure_share(self._context, self.share) + + def test_delete_share(self): + mount_path = self._get_mount_path(self.share) + self._helper_nfs.remove_export(mount_path, self.share['name']) + self.mox.ReplayAll() + self._driver.delete_share(self._context, self.share) + + def test_delete_snapshot(self): + self.mox.ReplayAll() + expected_exec = ['lvremove -f fakevg/fakesnapshotname'] + self._driver.delete_snapshot(self._context, self.snapshot) + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_delete_share_process_error(self): + self.mox.StubOutWithMock(self._driver, '_get_mount_path') + self._driver._get_mount_path(self.share).AndRaise( + exception.ProcessExecutionError()) + self.mox.ReplayAll() + self._driver.delete_share(self._context, self.share) + + def test_delete_share_invalid_share(self): + self.mox.StubOutWithMock(self._driver, '_get_helper') + self._driver._get_helper(self.share).AndRaise( + exception.InvalidShare(reason='fake')) + self.mox.ReplayAll() + self._driver.delete_share(self._context, self.share) + + def test_allow_access(self): + mount_path = self._get_mount_path(self.share) + self._helper_nfs.allow_access(mount_path, + self.share['name'], + self.access['access_type'], + self.access['access_to']) + self.mox.ReplayAll() + self._driver.allow_access(self._context, self.share, self.access) + + def test_deny_access(self): + mount_path = self._get_mount_path(self.share) + self._helper_nfs.deny_access(mount_path, + self.share['name'], + self.access['access_type'], + self.access['access_to']) + self.mox.ReplayAll() + self._driver.deny_access(self._context, self.share, self.access) + + def test_mount_device(self): + mount_path = self._get_mount_path(self.share) + self.mox.ReplayAll() + ret = self._driver._mount_device(self.share, 'fakedevice') + expected_exec = [ + "mkdir -p %s" % (mount_path,), + "mount fakedevice %s" % (mount_path,), + "chmod 777 %s" % (mount_path,), + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + self.assertEqual(ret, mount_path) + + def test_mount_device_already(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError(stderr='already mounted') + + mount_path = self._get_mount_path(self.share) + expected_exec = [ + "mkdir -p %s" % (mount_path,), + "mount fakedevice %s" % (mount_path,), + ] + fake_utils.fake_execute_set_repliers([(expected_exec[1], exec_runner)]) + self.mox.ReplayAll() + ret = self._driver._mount_device(self.share, 'fakedevice') + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + self.assertEqual(ret, mount_path) + + def test_mount_device_error(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError(stderr='fake error') + + mount_path = self._get_mount_path(self.share) + expected_exec = [ + "mkdir -p %s" % (mount_path,), + "mount fakedevice %s" % (mount_path,), + ] + fake_utils.fake_execute_set_repliers([(expected_exec[1], exec_runner)]) + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self._driver._mount_device, self.share, 'fakedevice') + + def test_get_helper(self): + share_cifs = fake_share(share_proto='CIFS') + share_nfs = fake_share(share_proto='NFS') + share_fake = fake_share(share_proto='FAKE') + self.mox.ReplayAll() + self.assertEqual(self._driver._get_helper(share_cifs), + self._helper_cifs) + self.assertEqual(self._driver._get_helper(share_nfs), + self._helper_nfs) + self.assertRaises(exception.InvalidShare, self._driver._get_helper, + fake_share(share_proto='FAKE')) + + def _get_mount_path(self, share): + return os.path.join(FLAGS.share_export_root, share['name']) + + +class NFSHelperTestCase(test.TestCase): + """Test case for NFS driver.""" + + def setUp(self): + super(NFSHelperTestCase, self).setUp() + fake_utils.stub_out_utils_execute(self.stubs) + FLAGS.set_default('share_export_ip', '127.0.0.1') + self._execute = fake_utils.fake_execute + self.fake_conf = Configuration(None) + self._helper = lvm.NFSHelper(self._execute, self.fake_conf) + fake_utils.fake_execute_clear_log() + + def tearDown(self): + super(NFSHelperTestCase, self).tearDown() + fake_utils.fake_execute_set_repliers([]) + fake_utils.fake_execute_clear_log() + + def test_failed_init(self): + self.mox.StubOutWithMock(self, '_execute') + self._execute('exportfs', check_exit_code=True, run_as_root=True).\ + AndRaise(exception.ProcessExecutionError) + self.mox.ReplayAll() + self.assertRaises(exception.Error, lvm.NFSHelper.__init__, + self._helper, self._execute, self.fake_conf) + + def test_create_export(self): + self.mox.ReplayAll() + ret = self._helper.create_export('/opt/nfs', 'volume-00001') + expected_location = '%s:/opt/nfs' % FLAGS.share_export_ip + self.assertEqual(ret, expected_location) + + def test_remove_export(self): + self.mox.ReplayAll() + self._helper.remove_export('/opt/nfs', 'volume-00001') + + def test_allow_access(self): + self.mox.ReplayAll() + self._helper.allow_access('/opt/nfs', 'volume-00001', 'ip', '10.0.0.*') + + export_string = '10.0.0.*:/opt/nfs' + expected_exec = [ + 'exportfs', + 'exportfs -o rw,no_subtree_check %s' % export_string, + ] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + def test_allow_access_no_ip(self): + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShareAccess, + self._helper.allow_access, '/opt/nfs', 'share0', + 'fake', 'fakerule') + + def test_allow_access_negative(self): + def exec_runner(*ignore_args, **ignore_kwargs): + return '\n/opt/nfs\t\t10.0.0.*\n', '' + + fake_utils.fake_execute_set_repliers([('exportfs', exec_runner)]) + self.mox.ReplayAll() + self.assertRaises(exception.ShareAccessExists, + self._helper.allow_access, + '/opt/nfs', 'volume-00001', 'ip', '10.0.0.*') + + def test_deny_access(self): + self.mox.ReplayAll() + self._helper.deny_access('/opt/nfs', 'volume-00001', 'ip', '10.0.0.*') + export_string = '10.0.0.*:/opt/nfs' + expected_exec = ['exportfs -u %s' % export_string] + self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) + + +class CIFSNetConfHelperTestCase(test.TestCase): + """Test case for CIFS driver with net conf management.""" + + def setUp(self): + super(CIFSNetConfHelperTestCase, self).setUp() + fake_utils.stub_out_utils_execute(self.stubs) + FLAGS.set_default('share_export_ip', '127.0.0.1') + self.share = fake_share() + self._execute = fake_utils.fake_execute + self.fake_conf = Configuration(None) + self._helper = lvm.CIFSNetConfHelper(self._execute, self.fake_conf) + fake_utils.fake_execute_clear_log() + + def tearDown(self): + super(CIFSNetConfHelperTestCase, self).tearDown() + fake_utils.fake_execute_set_repliers([]) + fake_utils.fake_execute_clear_log() + + def test_create_export(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'addshare', share_name, + 'fakelocalpath', 'writeable=y', 'guest_ok=y', + run_as_root=True) + parameters = { + 'browseable': 'yes', + 'create mask': '0755', + 'hosts deny': '0.0.0.0/0', + 'hosts allow': '127.0.0.1', + } + for name, value in parameters.items(): + self._helper._execute('net', 'conf', 'setparm', share_name, name, + value, run_as_root=True) + self.mox.ReplayAll() + ret = self._helper.create_export('fakelocalpath', share_name) + expected_ret = "//127.0.0.1/%s" % (share_name,) + self.assertEqual(ret, expected_ret) + + def test_create_export_already_exists(self): + def exec_runner(*ignore_args, **ignore_kwargs): + raise exception.ProcessExecutionError(stderr='already exists') + + expected_exec = [ + "net conf addshare %s %s writeable=y guest_ok=y" % ( + self.share['name'], + 'fakelocalpath', + ), + ] + fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) + self.mox.ReplayAll() + self.assertRaises(exception.ShareBackendException, + self._helper.create_export, 'fakelocalpath', + self.share['name']) + + def test_create_export_recreate(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'addshare', share_name, + 'fakelocalpath', 'writeable=y', 'guest_ok=y', + run_as_root=True).\ + AndRaise(exception.ProcessExecutionError(stderr='already exists')) + self._helper._execute('net', 'conf', 'delshare', share_name, + run_as_root=True) + self._helper._execute('net', 'conf', 'addshare', share_name, + 'fakelocalpath', 'writeable=y', 'guest_ok=y', + run_as_root=True) + parameters = { + 'browseable': 'yes', + 'create mask': '0755', + 'hosts deny': '0.0.0.0/0', + 'hosts allow': '127.0.0.1', + } + for name, value in parameters.items(): + self._helper._execute('net', 'conf', 'setparm', share_name, name, + value, run_as_root=True) + self.mox.ReplayAll() + ret = self._helper.create_export('fakelocalpath', share_name, + recreate=True) + expected_ret = "//127.0.0.1/%s" % (share_name,) + self.assertEqual(ret, expected_ret) + + def test_create_export_error(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'addshare', share_name, + 'fakelocalpath', 'writeable=y', 'guest_ok=y', + run_as_root=True).\ + AndRaise(exception.ProcessExecutionError(stderr='fake error')) + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self._helper.create_export, 'fakelocalpath', + share_name) + + def test_remove_export(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'delshare', share_name, + run_as_root=True) + self._helper._execute('smbcontrol', 'all', 'close-share', share_name, + run_as_root=True) + self.mox.ReplayAll() + self._helper.remove_export('fakelocalpath', share_name) + + def test_remove_export_no_such_service(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'delshare', share_name, + run_as_root=True).\ + AndRaise(exception.ProcessExecutionError( + stderr='SBC_ERR_NO_SUCH_SERVICE')) + self._helper._execute('smbcontrol', 'all', 'close-share', share_name, + run_as_root=True) + self.mox.ReplayAll() + self._helper.remove_export('fakelocalpath', share_name) + + def test_remove_export_error(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'delshare', share_name, + run_as_root=True).\ + AndRaise(exception.ProcessExecutionError(stderr='fake error')) + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self._helper.remove_export, 'fakelocalpath', + share_name) + + def test_allow_access(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_get_allow_hosts') + self.mox.StubOutWithMock(self._helper, '_set_allow_hosts') + self._helper._get_allow_hosts(share_name).AndReturn(['127.0.0.1', + '10.0.0.1']) + self._helper._set_allow_hosts(['127.0.0.1', '10.0.0.1', '10.0.0.2'], + share_name) + self.mox.ReplayAll() + self._helper.allow_access('fakelocalpath', share_name, 'ip', + '10.0.0.2') + + def test_allow_access_exists(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_get_allow_hosts') + self._helper._get_allow_hosts(share_name).AndReturn(['127.0.0.1', + '10.0.0.1']) + self.mox.ReplayAll() + self.assertRaises(exception.ShareAccessExists, + self._helper.allow_access, 'fakelocalpath', + share_name, 'ip', '10.0.0.1') + + def test_allow_access_wrong_type(self): + share_name = self.share['name'] + self.mox.ReplayAll() + self.assertRaises(exception.InvalidShareAccess, + self._helper.allow_access, 'fakelocalpath', + share_name, 'fake', 'fake access') + + def test_deny_access(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_get_allow_hosts') + self.mox.StubOutWithMock(self._helper, '_set_allow_hosts') + self._helper._get_allow_hosts(share_name).AndReturn(['127.0.0.1', + '10.0.0.1']) + self._helper._set_allow_hosts(['127.0.0.1'], share_name) + self.mox.ReplayAll() + self._helper.deny_access('fakelocalpath', share_name, 'ip', + '10.0.0.1') + + def test_deny_access_not_exists(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_get_allow_hosts') + self._helper._get_allow_hosts(share_name).\ + AndRaise(exception.ProcessExecutionError(stdout='does not exist')) + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self._helper.deny_access, 'fakelocalpath', + share_name, 'ip', '10.0.0.1') + + def test_deny_access_not_exists_force(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_get_allow_hosts') + self._helper._get_allow_hosts(share_name).\ + AndRaise(exception.ProcessExecutionError(stdout='does not exist')) + self.mox.ReplayAll() + self._helper.deny_access('fakelocalpath', share_name, 'ip', '10.0.0.1', + force=True) + + def test_deny_access_error(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_get_allow_hosts') + self._helper._get_allow_hosts(share_name).\ + AndRaise(exception.ProcessExecutionError(stdout='fake out')) + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self._helper.deny_access, 'fakelocalpath', + share_name, 'ip', '10.0.0.1') + + def test_get_allow_hosts(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'getparm', share_name, + 'hosts allow', run_as_root=True).\ + AndReturn(('127.0.0.1 10.0.0.1', '')) + self.mox.ReplayAll() + ret = self._helper._get_allow_hosts(share_name) + self.assertEqual(ret, ['127.0.0.1', '10.0.0.1']) + + def test_set_allow_hosts(self): + share_name = self.share['name'] + self.mox.StubOutWithMock(self._helper, '_execute') + self._helper._execute('net', 'conf', 'setparm', share_name, + 'hosts allow', '127.0.0.1 10.0.0.1', + run_as_root=True) + self.mox.ReplayAll() + self._helper._set_allow_hosts(['127.0.0.1', '10.0.0.1'], share_name) diff --git a/cinder/tests/test_share_netapp.py b/cinder/tests/test_share_netapp.py new file mode 100644 index 0000000000..89e697c247 --- /dev/null +++ b/cinder/tests/test_share_netapp.py @@ -0,0 +1,690 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 NetApp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the NetApp NAS driver module.""" + +from mox import IgnoreArg +import random +import suds + +from cinder import context +from cinder import exception +from cinder.share.configuration import Configuration +from cinder.share.drivers import netapp +from cinder import test + + +class FakeObject(object): + pass + + +class FakeRequest(object): + def __init__(self, name=None, args=None): + self.Name = name + self.Args = args + + +class FakeStartResp(object): + def __init__(self): + self.Tag = random.randint(1, 100) + self.Records = random.randint(1, 10) + + +class FakeStatus(object): + def __init__(self, status): + self.Status = status + + +class FakeAggregates(object): + def __init__(self, max_aggr_id): + class AggrSizeAvail(object): + def __init__(self, filer_id, avail): + self.AggregateSize = FakeObject() + self.FilerId = filer_id + self.AggregateName = 'filer%d:aggr0' % filer_id + setattr(self.AggregateSize, 'SizeAvailable', avail) + + class AggregateInfo(object): + def __init__(self): + self.AggregateInfo = [AggrSizeAvail(1, 10), + AggrSizeAvail(2, 20), + AggrSizeAvail(3, 1), + AggrSizeAvail(max_aggr_id, 50), + AggrSizeAvail(5, 15)] + + self.Aggregates = AggregateInfo() + + +class FakeSnapshots(object): + def __init__(self, snapshot_name, is_busy='false'): + class Result(object): + def __init__(self): + self.snapshots = [{}] + self.snapshots[0]['snapshot-info'] = [ + {'name': [snapshot_name], 'busy': [is_busy]}, + {'name': ['fakesnapname1'], 'busy': [is_busy]}, + {'name': ['fakesnapname2'], 'busy': ['true']}, + ] + + self.Results = Result() + + +class FakeNfsRules(object): + def __init__(self): + class Rules(object): + def __init__(self): + self.rules = [ + {'exports-rule-info-2': [ + {'security-rules': [ + {'security-rule-info': [ + {'root': [ + {'exports-hostname-info': [ + {'name': 'allowed_host'}, + {'name': 'disallowed_host'}]} + ]} + ]} + ]} + ]} + ] + + self.Results = Rules() + + +class FakeHost(object): + def __init__(self, id): + self.HostId = id + + +class FakeHostInfo(object): + def __init__(self): + self.Hosts = FakeObject() + setattr(self.Hosts, 'HostInfo', [FakeHost(1), FakeHost(2)]) + + +class FakeFilter(object): + def __init__(self, id=0): + self.ObjectNameOrId = id + + +class FakeTimestamp(object): + def __init__(self, monitor_name='file_system', last_stamp=1): + self.MonitorName = monitor_name + self.LastMonitoringTimestamp = last_stamp + + +class NetAppShareDriverTestCase(test.TestCase): + """Tests Netapp-specific share driver. + """ + + def setUp(self): + super(NetAppShareDriverTestCase, self).setUp() + self._context = context.get_admin_context() + self._db = self.mox.CreateMockAnything() + self._driver = netapp.NetAppShareDriver( + self._db, + configuration=Configuration(None)) + self._driver._client = self.mox.CreateMock(netapp.NetAppApiClient) + cifs_helper = self.mox.CreateMock(netapp.NetAppCIFSHelper) + nfs_helper = self.mox.CreateMock(netapp.NetAppNFSHelper) + self._driver._helpers = {'CIFS': cifs_helper, 'NFS': nfs_helper} + + def test_setup_check(self): + self._driver._client.do_setup() + self.mox.ReplayAll() + self._driver.do_setup(self._context) + + def test_load_balancer(self): + drv = self._driver + max_aggr_id = 123 + + drv._client.get_available_aggregates().AndReturn( + FakeAggregates(max_aggr_id)) + + self.mox.ReplayAll() + + aggr = drv._find_best_aggregate() + + self.assertEquals(max_aggr_id, aggr.FilerId) + + def test_allocate_container(self): + drv = self._driver + client = drv._client + share = {'id': 'fakeshareid', 'size': 1} + max_aggr_id = 123 + + client.get_available_aggregates().AndReturn( + FakeAggregates(max_aggr_id)) + client.send_request_to(max_aggr_id, 'volume-create', IgnoreArg()) + + self.mox.ReplayAll() + + drv.allocate_container(self._context, share) + + self.assertEqual(max_aggr_id, drv._share_table[share['id']]) + + def test_allocate_container_from_snapshot(self): + drv = self._driver + client = drv._client + share_id = 'fakeshareid' + share = {'id': share_id, 'size': 1} + snapshot = {'id': 'fakesnapshotid', 'size': 1, + 'share_id': share_id} + max_aggr_id = 123 + + drv._share_table[share_id] = max_aggr_id + + client.send_request_to(max_aggr_id, 'volume-clone-create', IgnoreArg()) + + self.mox.ReplayAll() + + drv.allocate_container_from_snapshot(self._context, share, snapshot) + + self.assertEqual(max_aggr_id, drv._share_table[share['id']]) + + def test_deallocate_container_target_exists(self): + drv = self._driver + client = drv._client + share_id = 'share-vol_id' + share = {'id': share_id, 'size': 1} + max_aggr_id = 123 + + client.get_available_aggregates().AndReturn( + FakeAggregates(max_aggr_id)) + client.send_request_to(max_aggr_id, 'volume-create', IgnoreArg()) + client.send_request_to(max_aggr_id, 'volume-offline', IgnoreArg()) + client.send_request_to(max_aggr_id, 'volume-destroy', IgnoreArg()) + + self.mox.ReplayAll() + + drv.allocate_container(self._context, share) + drv.deallocate_container(self._context, share) + + self.assertEquals(len(drv._share_table.keys()), 0) + + def test_share_create(self): + drv = self._driver + ctx = self._context + share_proto = 'CIFS' + share = {'id': '1234-abcd-5678', + 'share_proto': share_proto, + 'size': 1} + + drv._helpers[share_proto].create_share(IgnoreArg(), share) + + self.mox.ReplayAll() + + drv.create_share(ctx, share) + + def test_share_delete(self): + drv = self._driver + ctx = self._context + share_proto = 'NFS' + helper = drv._helpers[share_proto] + ip = '172.10.0.1' + export = '/export_path' + share = {'id': 'abcd-1234', + 'share_proto': share_proto, + 'export_location': ':'.join([ip, export])} + fake_access_rules = [1, 2, 3] + + helper.get_target(share).AndReturn(ip) + helper.delete_share(share) + + self.mox.ReplayAll() + + drv.delete_share(ctx, share) + + def test_create_snapshot(self): + drv = self._driver + client = drv._client + share_id = 'fakeshareid' + share = {'id': share_id, 'size': 1} + snapshot = {'id': 'fakesnapshotid', 'size': 1, + 'share_id': share_id} + max_aggr_id = 123 + + drv._share_table[share_id] = max_aggr_id + + client.send_request_to(max_aggr_id, 'snapshot-create', IgnoreArg()) + + self.mox.ReplayAll() + + drv.create_snapshot(self._context, snapshot) + + def test_delete_snapshot(self): + drv = self._driver + client = drv._client + share_id = 'fakeshareid' + share = {'id': share_id, 'size': 1} + snapshot = {'id': 'fakesnapshotid', 'size': 1, + 'share_id': share_id} + max_aggr_id = 123 + + drv._share_table[share_id] = max_aggr_id + + client.send_request_to(max_aggr_id, 'snapshot-list-info', IgnoreArg(), + do_response_check=False).\ + AndReturn(FakeSnapshots(netapp._get_valid_snapshot_name( + snapshot['id']))) + client.send_request_to(max_aggr_id, 'snapshot-delete', IgnoreArg()) + + self.mox.ReplayAll() + + drv.delete_snapshot(self._context, snapshot) + + def test_delete_snapshot_if_busy(self): + drv = self._driver + client = drv._client + share_id = 'fakeshareid' + share = {'id': share_id, 'size': 1} + snapshot = {'id': 'fakesnapshotid', 'size': 1, + 'share_id': share_id} + max_aggr_id = 123 + + drv._share_table[share_id] = max_aggr_id + + client.send_request_to(max_aggr_id, 'snapshot-list-info', IgnoreArg(), + do_response_check=False).\ + AndReturn(FakeSnapshots(netapp._get_valid_snapshot_name( + snapshot['id']), is_busy='true')) + + self.mox.ReplayAll() + + self.assertRaises(exception.ShareSnapshotIsBusy, drv.delete_snapshot, + self._context, snapshot) + + def test_allow_access(self): + drv = self._driver + share_proto = 'CIFS' + ctx = self._context + share = {'share_proto': share_proto} + access = {} + + drv._helpers[share_proto].allow_access(ctx, share, access) + + self.mox.ReplayAll() + + drv.allow_access(ctx, share, access) + + def test_deny_access(self): + drv = self._driver + share_proto = 'CIFS' + ctx = self._context + share = {'share_proto': share_proto} + access = {} + + drv._helpers[share_proto].deny_access(ctx, share, access) + + self.mox.ReplayAll() + + drv.deny_access(ctx, share, access) + + def test_no_aggregates_available(self): + drv = self._driver + ctx = self._context + share = None + + drv._client.get_available_aggregates().AndReturn(None) + + self.mox.ReplayAll() + + self.assertRaises(exception.Error, drv.allocate_container, ctx, share) + + +class NetAppNfsHelperTestCase(test.TestCase): + """ + Tests Netapp-specific NFS driver. + """ + def setUp(self): + super(NetAppNfsHelperTestCase, self).setUp() + + fake_client = self.mox.CreateMock(netapp.NetAppApiClient) + fake_conf = self.mox.CreateMock(Configuration) + self._driver = netapp.NetAppNFSHelper(fake_client, fake_conf) + + def test_create_share(self): + drv = self._driver + client = drv._client + target = 123 + share = {'id': 'abc-1234-567'} + + client.send_request_to(target, 'nfs-exportfs-append-rules-2', + IgnoreArg()) + client.get_host_ip_by(target).AndReturn('host:export') + + self.mox.ReplayAll() + + export = drv.create_share(target, share) + + self.assertEquals(export.find('-'), -1) + + def test_delete_share(self): + drv = self._driver + client = drv._client + share = {'export_location': 'host:export'} + + client.send_request_to(IgnoreArg(), 'nfs-exportfs-delete-rules', + IgnoreArg()) + + self.mox.ReplayAll() + + drv.delete_share(share) + + def test_invalid_allow_access(self): + drv = self._driver + share = None + access = {'access_type': 'passwd'} # passwd type is not supported + + self.assertRaises(exception.Error, drv.allow_access, context, share, + access) + + def test_allow_access(self): + drv = self._driver + client = drv._client + share = {'export_location': 'host:export'} + access = {'access_to': ['127.0.0.1', '127.0.0.2'], + 'access_type': 'ip'} + + client.send_request_to(IgnoreArg(), 'nfs-exportfs-list-rules-2', + IgnoreArg()).AndReturn(FakeNfsRules()) + client.send_request_to(IgnoreArg(), 'nfs-exportfs-append-rules-2', + IgnoreArg()) + + self.mox.ReplayAll() + + drv.allow_access(context, share, access) + + def test_deny_access(self): + drv = self._driver + client = drv._client + share = {'export_location': 'host:export'} + access = {'access_to': ['127.0.0.1', '127.0.0.2']} + + client.send_request_to(IgnoreArg(), 'nfs-exportfs-list-rules-2', + IgnoreArg()).AndReturn(FakeNfsRules()) + client.send_request_to(IgnoreArg(), 'nfs-exportfs-append-rules-2', + IgnoreArg()) + + self.mox.ReplayAll() + + drv.deny_access(context, share, access) + + def test_get_target(self): + drv = self._driver + ip = '172.18.0.1' + export_path = '/home' + share = {'export_location': ':'.join([ip, export_path])} + + self.assertEquals(drv.get_target(share), ip) + + +class NetAppCifsHelperTestCase(test.TestCase): + """ + Tests Netapp-specific CIFS driver. + """ + def setUp(self): + super(NetAppCifsHelperTestCase, self).setUp() + + fake_client = self.mox.CreateMock(netapp.NetAppApiClient) + fake_conf = self.mox.CreateMock(Configuration) + self._driver = netapp.NetAppCIFSHelper(fake_client, fake_conf) + + def tearDown(self): + super(NetAppCifsHelperTestCase, self).tearDown() + + def test_create_share(self): + drv = self._driver + client = drv._client + target = 123 + share = {'id': 'abc-1234-567'} + ip = '172.0.0.1' + + client.send_request_to(target, 'cifs-status').AndReturn( + FakeStatus('stopped')) + client.send_request_to(target, 'cifs-start', + do_response_check=False) + client.send_request_to(target, 'system-cli', IgnoreArg()) + client.send_request_to(target, 'cifs-share-add', IgnoreArg()) + client.send_request_to(target, 'cifs-share-ace-delete', IgnoreArg()) + client.get_host_ip_by(target).AndReturn(ip) + + self.mox.ReplayAll() + + export = drv.create_share(target, share) + + self.assertEquals(export.find('-'), -1) + self.assertTrue(export.startswith('//' + ip)) + + def test_delete_share(self): + drv = self._driver + client = drv._client + ip = '172.10.0.1' + export = 'home' + share = {'export_location': '//%s/%s' % (ip, export)} + + client.send_request_to(IgnoreArg(), 'cifs-share-delete', IgnoreArg()) + + self.mox.ReplayAll() + + drv.delete_share(share) + + def test_allow_access_by_ip(self): + drv = self._driver + access = {'access_type': 'ip', 'access_to': '123.123.123.123'} + share = None + + self.assertRaises(exception.Error, drv.allow_access, context, share, + access) + + def test_allow_access_by_passwd_invalid_user(self): + drv = self._driver + client = drv._client + access = {'access_type': 'passwd', 'access_to': 'user:pass'} + ip = '172.0.0.1' + export = 'export_path' + share = {'export_location': '//%s/%s' % (ip, export)} + status = FakeStatus('failed') + + client.send_request_to(ip, 'useradmin-user-list', IgnoreArg(), + do_response_check=False).AndReturn(status) + + self.mox.ReplayAll() + + self.assertRaises(exception.Error, drv.allow_access, context, share, + access) + + def test_allow_access_by_passwd_existing_user(self): + drv = self._driver + client = drv._client + access = {'access_type': 'passwd', 'access_to': 'user:pass'} + ip = '172.0.0.1' + export = 'export_path' + share = {'export_location': '//%s/%s' % (ip, export)} + status = FakeStatus('passed') + + client.send_request_to(ip, 'useradmin-user-list', IgnoreArg(), + do_response_check=False).AndReturn(status) + client.send_request_to(ip, 'cifs-share-ace-set', IgnoreArg()) + + self.mox.ReplayAll() + + drv.allow_access(context, share, access) + + def test_deny_access(self): + drv = self._driver + client = drv._client + access = {'access_type': 'passwd', 'access_to': 'user:pass'} + ip = '172.0.0.1' + export = 'export_path' + share = {'export_location': '//%s/%s' % (ip, export)} + + client.send_request_to(ip, 'cifs-share-ace-delete', IgnoreArg()) + + self.mox.ReplayAll() + + drv.deny_access(context, share, access) + + def test_get_target(self): + drv = self._driver + ip = '172.10.0.1' + export = 'export_path' + share = {'export_location': '//%s/%s' % (ip, export)} + + self.assertEquals(drv.get_target(share), ip) + + +class NetAppNASHelperTestCase(test.TestCase): + def setUp(self): + super(NetAppNASHelperTestCase, self).setUp() + + fake_client = self.mox.CreateMock(suds.client.Client) + fake_conf = self.mox.CreateMock(Configuration) + self._driver = netapp.NetAppNASHelperBase(fake_client, fake_conf) + + def tearDown(self): + super(NetAppNASHelperTestCase, self).tearDown() + + def test_create_share(self): + drv = self._driver + target_id = None + share = None + self.assertRaises(NotImplementedError, drv.create_share, target_id, + share) + + def test_delete_share(self): + drv = self._driver + share = None + self.assertRaises(NotImplementedError, drv.delete_share, share) + + def test_allow_access(self): + drv = self._driver + share = None + ctx = None + access = None + self.assertRaises(NotImplementedError, drv.allow_access, ctx, share, + access) + + def test_deny_access(self): + drv = self._driver + share = None + ctx = None + access = None + self.assertRaises(NotImplementedError, drv.deny_access, ctx, share, + access) + + def test_get_target(self): + drv = self._driver + share = None + self.assertRaises(NotImplementedError, drv.get_target, share) + + +class NetAppApiClientTestCase(test.TestCase): + """Tests for NetApp DFM API client. + """ + + def setUp(self): + super(NetAppApiClientTestCase, self).setUp() + self.fake_conf = self.mox.CreateMock(Configuration) + self._context = context.get_admin_context() + self._driver = netapp.NetAppApiClient(self.fake_conf) + + self._driver._client = self.mox.CreateMock(suds.client.Client) + self._driver._client.factory = self.mox.CreateMock(suds.client.Factory) + # service object is generated dynamically from XML + self._driver._client.service = self.mox.CreateMockAnything( + suds.client.ServiceSelector) + + def test_get_host_by_ip(self): + drv = self._driver + client = drv._client + service = client.service + host_id = 123 + + # can't use 'filter' because it's predefined in Python + fltr = client.factory.create('HostListInfoIterStart').AndReturn( + FakeFilter()) + + resp = service.HostListInfoIterStart(HostListInfoIterStart=fltr) + resp = resp.AndReturn(FakeStartResp()) + service_list = service.HostListInfoIterNext(Tag=resp.Tag, + Maximum=resp.Records) + service_list.AndReturn(FakeHostInfo()) + service.HostListInfoIterEnd(Tag=resp.Tag) + + self.mox.ReplayAll() + + drv.get_host_ip_by(host_id) + + def test_get_available_aggregates(self): + drv = self._driver + client = drv._client + service = client.service + + resp = service.AggregateListInfoIterStart().AndReturn(FakeStartResp()) + service.AggregateListInfoIterNext(Tag=resp.Tag, Maximum=resp.Records) + service.AggregateListInfoIterEnd(resp.Tag) + + self.mox.ReplayAll() + + drv.get_available_aggregates() + + def test_send_successfull_request(self): + drv = self._driver + client = drv._client + service = client.service + factory = client.factory + + target = 1 + args = '' + responce_check = False + request = factory.create('Request').AndReturn(FakeRequest()) + + service.ApiProxy(Target=target, Request=request) + + self.mox.ReplayAll() + + drv.send_request_to(target, request, args, responce_check) + + def test_send_failing_request(self): + drv = self._driver + client = drv._client + service = client.service + factory = client.factory + + target = 1 + args = '' + responce_check = True + request = factory.create('Request').AndReturn(FakeRequest()) + + service.ApiProxy(Target=target, Request=request).AndRaise( + exception.Error()) + + self.mox.ReplayAll() + + self.assertRaises(exception.Error, drv.send_request_to, + target, request, args, responce_check) + + def test_successfull_setup(self): + drv = self._driver + for flag in drv.REQUIRED_FLAGS: + setattr(netapp.FLAGS, flag, 'val') + conf_obj = Configuration(netapp.FLAGS) + drv.check_configuration(conf_obj) + + def test_failing_setup(self): + drv = self._driver + self.assertRaises(exception.Error, drv.check_configuration, + Configuration(netapp.FLAGS)) diff --git a/cinder/tests/test_share_rpcapi.py b/cinder/tests/test_share_rpcapi.py new file mode 100644 index 0000000000..e63a2ae07f --- /dev/null +++ b/cinder/tests/test_share_rpcapi.py @@ -0,0 +1,150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NetApp +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for cinder.volume.rpcapi. +""" + + +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common import rpc +from cinder.share import rpcapi as share_rpcapi +from cinder import test + + +FLAGS = flags.FLAGS + + +class ShareRpcAPITestCase(test.TestCase): + + def setUp(self): + self.context = context.get_admin_context() + shr = {} + shr['host'] = 'fake_host' + shr['availability_zone'] = FLAGS.storage_availability_zone + shr['status'] = "available" + share = db.share_create(self.context, shr) + acs = {} + acs['access_type'] = "ip" + acs['access_to'] = "123.123.123.123" + acs['share_id'] = share['id'] + access = db.share_access_create(self.context, acs) + snap = {} + snap['share_id'] = share['id'] + snapshot = db.share_snapshot_create(self.context, snap) + self.fake_share = jsonutils.to_primitive(share) + self.fake_access = jsonutils.to_primitive(access) + self.fake_snapshot = jsonutils.to_primitive(snapshot) + super(ShareRpcAPITestCase, self).setUp() + + def test_serialized_share_has_id(self): + self.assertTrue('id' in self.fake_share) + + def _test_share_api(self, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + + if 'rpcapi_class' in kwargs: + rpcapi_class = kwargs['rpcapi_class'] + del kwargs['rpcapi_class'] + else: + rpcapi_class = share_rpcapi.ShareAPI + rpcapi = rpcapi_class() + expected_retval = 'foo' if method == 'call' else None + + expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) + expected_msg = rpcapi.make_msg(method, **kwargs) + if 'share' in expected_msg['args']: + share = expected_msg['args']['share'] + del expected_msg['args']['share'] + expected_msg['args']['share_id'] = share['id'] + if 'access' in expected_msg['args']: + access = expected_msg['args']['access'] + del expected_msg['args']['access'] + expected_msg['args']['access_id'] = access['id'] + del expected_msg['args']['share_id'] + if 'host' in expected_msg['args']: + del expected_msg['args']['host'] + if 'snapshot' in expected_msg['args']: + snapshot = expected_msg['args']['snapshot'] + del expected_msg['args']['snapshot'] + expected_msg['args']['snapshot_id'] = snapshot['id'] + + expected_msg['version'] = expected_version + + if 'host' in kwargs: + host = kwargs['host'] + else: + host = kwargs['share']['host'] + expected_topic = '%s.%s' % (FLAGS.share_topic, host) + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.stubs.Set(rpc, rpc_method, _fake_rpc_method) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected_args = [ctxt, expected_topic, expected_msg] + for arg, expected_arg in zip(self.fake_args, expected_args): + self.assertEqual(arg, expected_arg) + + def test_create_share(self): + self._test_share_api('create_share', + rpc_method='cast', + share=self.fake_share, + host='fake_host1', + snapshot_id='fake_snapshot_id', + filter_properties=None, + request_spec=None) + + def test_delete_share(self): + self._test_share_api('delete_share', + rpc_method='cast', + share=self.fake_share) + + def test_allow_access(self): + self._test_share_api('allow_access', + rpc_method='cast', + share=self.fake_share, + access=self.fake_access) + + def test_deny_access(self): + self._test_share_api('deny_access', + rpc_method='cast', + share=self.fake_share, + access=self.fake_access) + + def test_create_snapshot(self): + self._test_share_api('create_snapshot', + rpc_method='cast', + share=self.fake_share, + snapshot=self.fake_snapshot) + + def test_delete_snapshot(self): + self._test_share_api('delete_snapshot', + rpc_method='cast', + snapshot=self.fake_snapshot, + host='fake_host') diff --git a/cinder/tests/test_sheepdog.py b/cinder/tests/test_sheepdog.py new file mode 100644 index 0000000000..70fab1afd1 --- /dev/null +++ b/cinder/tests/test_sheepdog.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Wenhao Xu +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder import test +from cinder.volume.drivers.sheepdog import SheepdogDriver + +COLLIE_NODE_INFO = """ +0 107287605248 3623897354 3% +Total 107287605248 3623897354 3% 54760833024 +""" + + +class SheepdogTestCase(test.TestCase): + + def setUp(self): + super(SheepdogTestCase, self).setUp() + self.driver = SheepdogDriver() + + def test_update_volume_stats(self): + def fake_stats(*args): + return COLLIE_NODE_INFO, '' + self.stubs.Set(self.driver, '_execute', fake_stats) + expected = dict( + volume_backend_name='sheepdog', + vendor_name='Open Source', + dirver_version='1.0', + storage_protocol='sheepdog', + total_capacity_gb=float(107287605248) / (1024 ** 3), + free_capacity_gb=float(107287605248 - 3623897354) / (1024 ** 3), + reserved_percentage=0, + QoS_support=False) + actual = self.driver.get_volume_stats(True) + self.assertDictMatch(expected, actual) + + def test_update_volume_stats_error(self): + def fake_stats(*args): + raise exception.ProcessExecutionError() + self.stubs.Set(self.driver, '_execute', fake_stats) + expected = dict( + volume_backend_name='sheepdog', + vendor_name='Open Source', + dirver_version='1.0', + storage_protocol='sheepdog', + total_capacity_gb='unknown', + free_capacity_gb='unknown', + reserved_percentage=0, + QoS_support=False) + actual = self.driver.get_volume_stats(True) + self.assertDictMatch(expected, actual) diff --git a/cinder/tests/test_skip_examples.py b/cinder/tests/test_skip_examples.py new file mode 100644 index 0000000000..2e51aef342 --- /dev/null +++ b/cinder/tests/test_skip_examples.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import test + + +class ExampleSkipTestCase(test.TestCase): + test_counter = 0 + + @test.skip_test("Example usage of @test.skip_test()") + def test_skip_test_example(self): + self.fail("skip_test failed to work properly.") + + @test.skip_if(True, "Example usage of @test.skip_if()") + def test_skip_if_example(self): + self.fail("skip_if failed to work properly.") + + @test.skip_unless(False, "Example usage of @test.skip_unless()") + def test_skip_unless_example(self): + self.fail("skip_unless failed to work properly.") + + @test.skip_if(False, "This test case should never be skipped.") + def test_001_increase_test_counter(self): + ExampleSkipTestCase.test_counter += 1 + + @test.skip_unless(True, "This test case should never be skipped.") + def test_002_increase_test_counter(self): + ExampleSkipTestCase.test_counter += 1 + + def test_003_verify_test_counter(self): + self.assertEquals(ExampleSkipTestCase.test_counter, 2, + "Tests were not skipped appropriately") diff --git a/cinder/tests/test_solidfire.py b/cinder/tests/test_solidfire.py new file mode 100644 index 0000000000..8679f4142d --- /dev/null +++ b/cinder/tests/test_solidfire.py @@ -0,0 +1,283 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox +from mox import IgnoreArg +from mox import IsA +from mox import stubout + + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.solidfire import SolidFire + +LOG = logging.getLogger(__name__) + + +def create_configuration(): + configuration = mox.MockObject(conf.Configuration) + configuration.san_is_local = False + configuration.append_config_values(mox.IgnoreArg()) + return configuration + + +class SolidFireVolumeTestCase(test.TestCase): + def setUp(self): + self._mox = mox.Mox() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.sf_allow_tenant_qos = True + self.configuration.san_is_local = True + self.configuration.sf_emulate_512 = True + self.configuration.sf_account_prefix = 'cinder' + + super(SolidFireVolumeTestCase, self).setUp() + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + + def fake_issue_api_request(obj, method, params): + if method is 'GetClusterCapacity': + LOG.info('Called Fake GetClusterCapacity...') + data = {} + data = {'result': + {'clusterCapacity': {'maxProvisionedSpace': 99999999, + 'usedSpace': 999, + 'compressionPercent': 100, + 'deDuplicationPercent': 100, + 'thinProvisioningPercent': 100}}} + return data + + if method is 'GetClusterInfo': + LOG.info('Called Fake GetClusterInfo...') + results = {'result': {'clusterInfo': + {'name': 'fake-cluster', + 'mvip': '1.1.1.1', + 'svip': '1.1.1.1', + 'uniqueID': 'unqid', + 'repCount': 2, + 'attributes': {}}}} + return results + + elif method is 'AddAccount': + LOG.info('Called Fake AddAccount...') + return {'result': {'accountID': 25}, 'id': 1} + + elif method is 'GetAccountByName': + LOG.info('Called Fake GetAccountByName...') + results = {'result': {'account': + {'accountID': 25, + 'username': params['username'], + 'status': 'active', + 'initiatorSecret': '123456789012', + 'targetSecret': '123456789012', + 'attributes': {}, + 'volumes': [6, 7, 20]}}, + "id": 1} + return results + + elif method is 'CreateVolume': + LOG.info('Called Fake CreateVolume...') + return {'result': {'volumeID': 5}, 'id': 1} + + elif method is 'DeleteVolume': + LOG.info('Called Fake DeleteVolume...') + return {'result': {}, 'id': 1} + + elif method is 'ListVolumesForAccount': + test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' + LOG.info('Called Fake ListVolumesForAccount...') + result = {'result': { + 'volumes': [{'volumeID': 5, + 'name': test_name, + 'accountID': 25, + 'sliceCount': 1, + 'totalSize': 1048576 * 1024, + 'enable512e': True, + 'access': "readWrite", + 'status': "active", + 'attributes': None, + 'qos': None, + 'iqn': test_name}]}} + return result + + else: + LOG.error('Crap, unimplemented API call in Fake:%s' % method) + + def fake_issue_api_request_fails(obj, method, params): + return {'error': {'code': 000, + 'name': 'DummyError', + 'message': 'This is a fake error response'}, + 'id': 1} + + def fake_set_qos_by_volume_type(self, type_id, ctxt): + return {'minIOPS': 500, + 'maxIOPS': 1000, + 'burstIOPS': 1000} + + def fake_volume_get(obj, key, default=None): + return {'qos': 'fast'} + + def fake_update_cluster_status(self): + return + + def test_create_with_qos_type(self): + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + self.stubs.Set(SolidFire, '_set_qos_by_volume_type', + self.fake_set_qos_by_volume_type) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': 'fast'} + + sfv = SolidFire(configuration=self.configuration) + model_update = sfv.create_volume(testvol) + self.assertNotEqual(model_update, None) + + def test_create_volume(self): + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None} + sfv = SolidFire(configuration=self.configuration) + model_update = sfv.create_volume(testvol) + self.assertNotEqual(model_update, None) + + def test_create_volume_with_qos(self): + preset_qos = {} + preset_qos['qos'] = 'fast' + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'metadata': [preset_qos], + 'volume_type_id': None} + + sfv = SolidFire(configuration=self.configuration) + model_update = sfv.create_volume(testvol) + self.assertNotEqual(model_update, None) + + def test_create_volume_fails(self): + # NOTE(JDG) This test just fakes update_cluster_status + # this is inentional for this test + self.stubs.Set(SolidFire, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request_fails) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + sfv = SolidFire(configuration=self.configuration) + try: + sfv.create_volume(testvol) + self.fail("Should have thrown Error") + except Exception: + pass + + def test_create_sfaccount(self): + sfv = SolidFire(configuration=self.configuration) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + account = sfv._create_sfaccount('project-id') + self.assertNotEqual(account, None) + + def test_create_sfaccount_fails(self): + sfv = SolidFire(configuration=self.configuration) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request_fails) + account = sfv._create_sfaccount('project-id') + self.assertEqual(account, None) + + def test_get_sfaccount_by_name(self): + sfv = SolidFire(configuration=self.configuration) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + account = sfv._get_sfaccount_by_name('some-name') + self.assertNotEqual(account, None) + + def test_get_sfaccount_by_name_fails(self): + sfv = SolidFire(configuration=self.configuration) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request_fails) + account = sfv._get_sfaccount_by_name('some-name') + self.assertEqual(account, None) + + def test_delete_volume(self): + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'test_volume', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + sfv = SolidFire(configuration=self.configuration) + sfv.delete_volume(testvol) + + def test_delete_volume_fails_no_volume(self): + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'no-name', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + sfv = SolidFire(configuration=self.configuration) + try: + sfv.delete_volume(testvol) + self.fail("Should have thrown Error") + except Exception: + pass + + def test_delete_volume_fails_account_lookup(self): + # NOTE(JDG) This test just fakes update_cluster_status + # this is inentional for this test + self.stubs.Set(SolidFire, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request_fails) + testvol = {'project_id': 'testprjid', + 'name': 'no-name', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + sfv = SolidFire(configuration=self.configuration) + self.assertRaises(exception.SfAccountNotFound, + sfv.delete_volume, + testvol) + + def test_get_cluster_info(self): + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request) + sfv = SolidFire(configuration=self.configuration) + sfv._get_cluster_info() + + def test_get_cluster_info_fail(self): + # NOTE(JDG) This test just fakes update_cluster_status + # this is inentional for this test + self.stubs.Set(SolidFire, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFire, '_issue_api_request', + self.fake_issue_api_request_fails) + sfv = SolidFire(configuration=self.configuration) + self.assertRaises(exception.SolidFireAPIException, + sfv._get_cluster_info) diff --git a/cinder/tests/test_storwize_svc.py b/cinder/tests/test_storwize_svc.py new file mode 100755 index 0000000000..d26b449550 --- /dev/null +++ b/cinder/tests/test_storwize_svc.py @@ -0,0 +1,1978 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 IBM Corp. +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Ronen Kat +# Avishay Traeger + +""" +Tests for the IBM Storwize family and SVC volume driver. +""" + +import random +import re +import socket +import unittest + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import test +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers import storwize_svc +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +class StorwizeSVCFakeDB: + def __init__(self): + self.volume = None + + def volume_get(self, context, vol_id): + return self.volume + + def volume_set(self, vol): + self.volume = vol + + +class StorwizeSVCManagementSimulator: + def __init__(self, pool_name): + self._flags = {'storwize_svc_volpool_name': pool_name} + self._volumes_list = {} + self._hosts_list = {} + self._mappings_list = {} + self._fcmappings_list = {} + self._next_cmd_error = { + 'lsportip': '', + 'lsfabric': '', + 'lsiscsiauth': '', + 'lsnodecanister': '', + 'mkvdisk': '', + 'lsvdisk': '', + 'lsfcmap': '', + 'prestartfcmap': '', + 'startfcmap': '', + 'rmfcmap': '', + 'lslicense': '', + } + self._errors = { + 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), + 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' + 'object already exists.'), + 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' + 'exist or is not a suitable candidate.'), + 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), + 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' + 'the maximum number of allowed iSCSI ' + 'qualified names (IQNs) has been reached, ' + 'or the IQN is already assigned or is not ' + 'valid.'), + 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' + 'exist, or the name supplied does not meet ' + 'the naming rules.'), + 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' + 'not created because the VDisk is already ' + 'mapped to a host.'), + 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' + 'not created because a VDisk is already ' + 'mapped to this host with this SCSI LUN.'), + 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' + 'not deleted because it is mapped to a ' + 'host or because it is part of a FlashCopy ' + 'or Remote Copy mapping, or is involved in ' + 'an image mode migrate.'), + 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' + 'is not valid. The name can contain letters, ' + 'numbers, spaces, periods, dashes, and ' + 'underscores. The name must begin with a ' + 'letter or an underscore. The name must not ' + 'begin or end with a space.'), + 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' + 'more of the configured port names is in a ' + 'mapping.'), + 'CMMVC5924E': ('', 'CMMVC5924E The FlashCopy mapping was not ' + 'created because the source and target ' + 'virtual disks (VDisks) are different sizes.'), + 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' + 'source and target VDisks are the same.'), + 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' + 'least one node in the I/O group does not ' + 'support compressed VDisks.'), + # Catch-all for invalid state transitions: + 'CMMVC5903E': ('', 'CMMVC5903E The FlashCopy mapping was not ' + 'changed because the mapping or consistency ' + 'group is another state.'), + } + self._transitions = {'begin': {'make': 'idle_or_copied'}, + 'idle_or_copied': {'prepare': 'preparing', + 'delete': 'end', + 'delete_force': 'end'}, + 'preparing': {'flush_failed': 'stopped', + 'wait': 'prepared'}, + 'end': None, + 'stopped': {'prepare': 'preparing', + 'delete_force': 'end'}, + 'prepared': {'stop': 'stopped', + 'start': 'copying'}, + 'copying': {'wait': 'idle_or_copied', + 'stop': 'stopping'}, + # Assume the worst case where stopping->stopped + # rather than stopping idle_or_copied + 'stopping': {'wait': 'stopped'}, + } + + def _state_transition(self, function, fcmap): + if (function == 'wait' and + 'wait' not in self._transitions[fcmap['status']]): + return ('', '') + + if fcmap['status'] == 'copying' and function == 'wait': + if fcmap['copyrate'] != '0': + if fcmap['progress'] == '0': + fcmap['progress'] = '50' + else: + fcmap['progress'] = '100' + fcmap['status'] = 'idle_or_copied' + return ('', '') + else: + try: + curr_state = fcmap['status'] + fcmap['status'] = self._transitions[curr_state][function] + return ('', '') + except Exception: + return self._errors['CMMVC5903E'] + + # Find an unused ID + def _find_unused_id(self, d): + ids = [] + for k, v in d.iteritems(): + ids.append(int(v['id'])) + ids.sort() + for index, n in enumerate(ids): + if n > index: + return str(index) + return str(len(ids)) + + # Check if name is valid + def _is_invalid_name(self, name): + if re.match("^[a-zA-Z_][\w ._-]*$", name): + return False + return True + + # Convert argument string to dictionary + def _cmd_to_dict(self, cmd): + arg_list = cmd.split() + no_param_args = [ + 'autodelete', + 'autoexpand', + 'bytes', + 'compressed', + 'force', + 'nohdr', + ] + one_param_args = [ + 'chapsecret', + 'cleanrate', + 'copyrate', + 'delim', + 'filtervalue', + 'grainsize', + 'hbawwpn', + 'host', + 'iogrp', + 'iscsiname', + 'mdiskgrp', + 'name', + 'rsize', + 'scsi', + 'size', + 'source', + 'target', + 'unit', + 'easytier', + 'warning', + 'wwpn', + ] + + # Handle the special case of lsnode which is a two-word command + # Use the one word version of the command internally + if arg_list[0] in ('svcinfo', 'svctask'): + if arg_list[1] == 'lsnode': + if len(arg_list) > 4: # e.g. svcinfo lsnode -delim ! + ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} + else: + ret = {'cmd': 'lsnodecanister'} + else: + ret = {'cmd': arg_list[1]} + arg_list.pop(0) + else: + ret = {'cmd': arg_list[0]} + + skip = False + for i in range(1, len(arg_list)): + if skip: + skip = False + continue + if arg_list[i][0] == '-': + if arg_list[i][1:] in no_param_args: + ret[arg_list[i][1:]] = True + elif arg_list[i][1:] in one_param_args: + ret[arg_list[i][1:]] = arg_list[i + 1] + skip = True + else: + raise exception.InvalidInput( + reason=_('unrecognized argument %s') % arg_list[i]) + else: + ret['obj'] = arg_list[i] + return ret + + def _print_info_cmd(self, rows, delim=' ', nohdr=False, **kwargs): + """Generic function for printing information.""" + if nohdr: + del rows[0] + + for index in range(len(rows)): + rows[index] = delim.join(rows[index]) + return ('%s' % '\n'.join(rows), '') + + def _print_info_obj_cmd(self, header, row, delim=' ', nohdr=False): + """Generic function for printing information for a specific object.""" + objrows = [] + for idx, val in enumerate(header): + objrows.append([val, row[idx]]) + + if nohdr: + for index in range(len(objrows)): + objrows[index] = ' '.join(objrows[index][1:]) + for index in range(len(objrows)): + objrows[index] = delim.join(objrows[index]) + return ('%s' % '\n'.join(objrows), '') + + def _convert_bytes_units(self, bytestr): + num = int(bytestr) + unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + unit_index = 0 + + while num > 1024: + num = num / 1024 + unit_index += 1 + + return '%d%s' % (num, unit_array[unit_index]) + + def _convert_units_bytes(self, num, unit): + unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + unit_index = 0 + + while unit.lower() != unit_array[unit_index].lower(): + num = num * 1024 + unit_index += 1 + + return str(num) + + def _cmd_lslicense(self, **kwargs): + rows = [None] * 3 + rows[0] = ['used_compression_capacity', '0.08'] + rows[1] = ['license_compression_capacity', '0'] + if self._next_cmd_error['lslicense'] == 'no_compression': + self._next_cmd_error['lslicense'] = '' + rows[2] = ['license_compression_enclosures', '0'] + else: + rows[2] = ['license_compression_enclosures', '1'] + return self._print_info_cmd(rows=rows, **kwargs) + + # Print mostly made-up stuff in the correct syntax + def _cmd_lssystem(self, **kwargs): + rows = [None] * 2 + rows[0] = ['id', '0123456789ABCDEF'] + rows[1] = ['name', 'storwize-svc-sim'] + return self._print_info_cmd(rows=rows, **kwargs) + + # Print mostly made-up stuff in the correct syntax, assume -bytes passed + def _cmd_lsmdiskgrp(self, **kwargs): + rows = [None] * 3 + rows[0] = ['id', 'name', 'status', 'mdisk_count', + 'vdisk_count', 'capacity', 'extent_size', + 'free_capacity', 'virtual_capacity', 'used_capacity', + 'real_capacity', 'overallocation', 'warning', + 'easy_tier', 'easy_tier_status'] + rows[1] = ['1', self._flags['storwize_svc_volpool_name'], 'online', + '1', str(len(self._volumes_list)), '3573412790272', + '256', '3529926246400', '1693247906775', '277841182', + '38203734097', '47', '80', 'auto', 'inactive'] + rows[2] = ['2', 'volpool2', 'online', + '1', '0', '3573412790272', '256', + '3529432325160', '1693247906775', '277841182', + '38203734097', '47', '80', 'auto', 'inactive'] + if 'obj' not in kwargs: + return self._print_info_cmd(rows=rows, **kwargs) + else: + if kwargs['obj'] == self._flags['storwize_svc_volpool_name']: + row = rows[1] + elif kwargs['obj'] == 'volpool2': + row = rows[2] + else: + return self._errors['CMMVC5754E'] + + objrows = [] + for idx, val in enumerate(rows[0]): + objrows.append([val, row[idx]]) + + if 'nohdr' in kwargs: + for index in range(len(objrows)): + objrows[index] = ' '.join(objrows[index][1:]) + + if 'delim' in kwargs: + for index in range(len(objrows)): + objrows[index] = kwargs['delim'].join(objrows[index]) + + return ('%s' % '\n'.join(objrows), '') + + # Print mostly made-up stuff in the correct syntax + def _cmd_lsnodecanister(self, **kwargs): + rows = [None] * 3 + rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', + 'IO_group_id', 'IO_group_name', 'config_node', + 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', + 'panel_name', 'enclosure_id', 'canister_id', + 'enclosure_serial_number'] + rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0', + 'io_grp0', + 'yes', '123456789ABCDEF0', '100', + 'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1', + '0123ABC'] + rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '0', + 'io_grp0', + 'no', '123456789ABCDEF1', '100', + 'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2', + '0123ABC'] + + if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': + rows[0].pop(2) + self._next_cmd_error['lsnodecanister'] = '' + if self._next_cmd_error['lsnodecanister'] == 'remove_field': + for row in rows: + row.pop(0) + self._next_cmd_error['lsnodecanister'] = '' + + return self._print_info_cmd(rows=rows, **kwargs) + + # Print information of every single node of SVC + def _cmd_lsnode(self, **kwargs): + node_infos = dict() + node_infos['1'] = r'''id!1 +name!node1 +port_id!500507680210C744 +port_status!active +port_speed!8Gb +port_id!500507680220C744 +port_status!active +port_speed!8Gb +''' + node_infos['2'] = r'''id!2 +name!node2 +port_id!500507680220C745 +port_status!active +port_speed!8Gb +port_id!500507680230C745 +port_status!inactive +port_speed!N/A +''' + node_id = kwargs.get('node_id', None) + stdout = node_infos.get(node_id, '') + return stdout, '' + + # Print mostly made-up stuff in the correct syntax + def _cmd_lsportip(self, **kwargs): + if self._next_cmd_error['lsportip'] == 'ip_no_config': + self._next_cmd_error['lsportip'] = '' + ip_addr1 = '' + ip_addr2 = '' + gw = '' + else: + ip_addr1 = '1.234.56.78' + ip_addr2 = '1.234.56.79' + gw = '1.234.56.1' + + rows = [None] * 17 + rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask', + 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', + 'duplex', 'state', 'speed', 'failover'] + rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0', + gw, '', '', '', '01:23:45:67:89:00', 'Full', + 'online', '1Gb/s', 'no'] + rows[2] = ['1', '1', 'node1', '', '', '', '', '', '', + '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes'] + rows[3] = ['2', '1', 'node1', '', '', '', '', '', '', + '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'no'] + rows[4] = ['2', '1', 'node1', '', '', '', '', '', '', + '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'yes'] + rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0', + gw, '', '', '', '01:23:45:67:89:02', 'Full', + 'online', '1Gb/s', 'no'] + rows[10] = ['1', '2', 'node2', '', '', '', '', '', '', + '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes'] + rows[11] = ['2', '2', 'node2', '', '', '', '', '', '', + '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'no'] + rows[12] = ['2', '2', 'node2', '', '', '', '', '', '', + '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', + 'yes'] + rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + + if self._next_cmd_error['lsportip'] == 'header_mismatch': + rows[0].pop(2) + self._next_cmd_error['lsportip'] = '' + if self._next_cmd_error['lsportip'] == 'remove_field': + for row in rows: + row.pop(1) + self._next_cmd_error['lsportip'] = '' + + return self._print_info_cmd(rows=rows, **kwargs) + + def _cmd_lsfabric(self, **kwargs): + host_name = kwargs['host'] if 'host' in kwargs else None + target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None + host_infos = [] + + for hk, hv in self._hosts_list.iteritems(): + if not host_name or hv['host_name'] == host_name: + for mk, mv in self._mappings_list.iteritems(): + if mv['host'] == hv['host_name']: + if not target_wwpn or target_wwpn in hv['wwpns']: + host_infos.append(hv) + break + + if not len(host_infos): + return ('', '') + + rows = [] + rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name', + 'local_wwpn', 'local_port', 'local_nportid', 'state', + 'name', 'cluster_name', 'type']) + for host_info in host_infos: + for wwpn in host_info['wwpns']: + rows.append([wwpn, '123456', host_info['id'], 'nodeN', + 'AABBCCDDEEFF0011', '1', '0123ABC', 'active', + host_info['host_name'], '', 'host']) + + if self._next_cmd_error['lsfabric'] == 'header_mismatch': + rows[0].pop(0) + self._next_cmd_error['lsfabric'] = '' + if self._next_cmd_error['lsfabric'] == 'remove_field': + for row in rows: + row.pop(0) + self._next_cmd_error['lsfabric'] = '' + return self._print_info_cmd(rows=rows, **kwargs) + + # Create a vdisk + def _cmd_mkvdisk(self, **kwargs): + # We only save the id/uid, name, and size - all else will be made up + volume_info = {} + volume_info['id'] = self._find_unused_id(self._volumes_list) + volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id'] + + if 'name' in kwargs: + volume_info['name'] = kwargs['name'].strip('\'\'') + else: + volume_info['name'] = 'vdisk' + volume_info['id'] + + # Assume size and unit are given, store it in bytes + capacity = int(kwargs['size']) + unit = kwargs['unit'] + volume_info['capacity'] = self._convert_units_bytes(capacity, unit) + + if 'easytier' in kwargs: + if kwargs['easytier'] == 'on': + volume_info['easy_tier'] = 'on' + else: + volume_info['easy_tier'] = 'off' + + if 'rsize' in kwargs: + # Fake numbers + volume_info['used_capacity'] = '786432' + volume_info['real_capacity'] = '21474816' + volume_info['free_capacity'] = '38219264' + if 'warning' in kwargs: + volume_info['warning'] = kwargs['warning'].rstrip('%') + else: + volume_info['warning'] = '80' + if 'autoexpand' in kwargs: + volume_info['autoexpand'] = 'on' + else: + volume_info['autoexpand'] = 'off' + if 'grainsize' in kwargs: + volume_info['grainsize'] = kwargs['grainsize'] + else: + volume_info['grainsize'] = '32' + if 'compressed' in kwargs: + volume_info['compressed_copy'] = 'yes' + else: + volume_info['compressed_copy'] = 'no' + else: + volume_info['used_capacity'] = volume_info['capacity'] + volume_info['real_capacity'] = volume_info['capacity'] + volume_info['free_capacity'] = '0' + volume_info['warning'] = '' + volume_info['autoexpand'] = '' + volume_info['grainsize'] = '' + volume_info['compressed_copy'] = 'no' + + if volume_info['name'] in self._volumes_list: + return self._errors['CMMVC6035E'] + else: + self._volumes_list[volume_info['name']] = volume_info + return ('Virtual Disk, id [%s], successfully created' % + (volume_info['id']), '') + + # Delete a vdisk + def _cmd_rmvdisk(self, **kwargs): + force = True if 'force' in kwargs else False + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + + if vol_name not in self._volumes_list: + return self._errors['CMMVC5753E'] + + if not force: + for k, mapping in self._mappings_list.iteritems(): + if mapping['vol'] == vol_name: + return self._errors['CMMVC5840E'] + for k, fcmap in self._fcmappings_list.iteritems(): + if ((fcmap['source'] == vol_name) or + (fcmap['target'] == vol_name)): + return self._errors['CMMVC5840E'] + + del self._volumes_list[vol_name] + return ('', '') + + def _get_fcmap_info(self, vol_name): + ret_vals = { + 'fc_id': '', + 'fc_name': '', + 'fc_map_count': '0', + } + for k, fcmap in self._fcmappings_list.iteritems(): + if ((fcmap['source'] == vol_name) or + (fcmap['target'] == vol_name)): + ret_vals['fc_id'] = fcmap['id'] + ret_vals['fc_name'] = fcmap['name'] + ret_vals['fc_map_count'] = '1' + return ret_vals + + # List information about vdisks + def _cmd_lsvdisk(self, **kwargs): + rows = [] + rows.append(['id', 'name', 'IO_group_id', 'IO_group_name', + 'status', 'mdisk_grp_id', 'mdisk_grp_name', + 'capacity', 'type', 'FC_id', 'FC_name', 'RC_id', + 'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count', + 'fast_write_state', 'se_copy_count', 'RC_change']) + + for k, vol in self._volumes_list.iteritems(): + if (('filtervalue' not in kwargs) or + (kwargs['filtervalue'] == 'name=' + vol['name'])): + fcmap_info = self._get_fcmap_info(vol['name']) + + if 'bytes' in kwargs: + cap = self._convert_bytes_units(vol['capacity']) + else: + cap = vol['capacity'] + rows.append([str(vol['id']), vol['name'], '0', 'io_grp0', + 'online', '0', + self._flags['storwize_svc_volpool_name'], + cap, 'striped', + fcmap_info['fc_id'], fcmap_info['fc_name'], + '', '', vol['uid'], + fcmap_info['fc_map_count'], '1', 'empty', + '1', 'no']) + + if 'obj' not in kwargs: + return self._print_info_cmd(rows=rows, **kwargs) + else: + if kwargs['obj'] not in self._volumes_list: + return self._errors['CMMVC5754E'] + vol = self._volumes_list[kwargs['obj']] + fcmap_info = self._get_fcmap_info(vol['name']) + cap = vol['capacity'] + cap_u = vol['used_capacity'] + cap_r = vol['real_capacity'] + cap_f = vol['free_capacity'] + if 'bytes' not in kwargs: + for item in [cap, cap_u, cap_r, cap_f]: + item = self._convert_bytes_units(item) + rows = [] + + rows.append(['id', str(vol['id'])]) + rows.append(['name', vol['name']]) + rows.append(['IO_group_id', '0']) + rows.append(['IO_group_name', 'io_grp0']) + rows.append(['status', 'online']) + rows.append(['mdisk_grp_id', '0']) + rows.append([ + 'mdisk_grp_name', + self._flags['storwize_svc_volpool_name']]) + rows.append(['capacity', cap]) + rows.append(['type', 'striped']) + rows.append(['formatted', 'no']) + rows.append(['mdisk_id', '']) + rows.append(['mdisk_name', '']) + rows.append(['FC_id', fcmap_info['fc_id']]) + rows.append(['FC_name', fcmap_info['fc_name']]) + rows.append(['RC_id', '']) + rows.append(['RC_name', '']) + rows.append(['vdisk_UID', vol['uid']]) + rows.append(['throttling', '0']) + + if self._next_cmd_error['lsvdisk'] == 'blank_pref_node': + rows.append(['preferred_node_id', '']) + self._next_cmd_error['lsvdisk'] = '' + elif self._next_cmd_error['lsvdisk'] == 'no_pref_node': + self._next_cmd_error['lsvdisk'] = '' + else: + rows.append(['preferred_node_id', '1']) + rows.append(['fast_write_state', 'empty']) + rows.append(['cache', 'readwrite']) + rows.append(['udid', '']) + rows.append(['fc_map_count', fcmap_info['fc_map_count']]) + rows.append(['sync_rate', '50']) + rows.append(['copy_count', '1']) + rows.append(['se_copy_count', '0']) + rows.append(['mirror_write_priority', 'latency']) + rows.append(['RC_change', 'no']) + rows.append(['used_capacity', cap_u]) + rows.append(['real_capacity', cap_r]) + rows.append(['free_capacity', cap_f]) + rows.append(['autoexpand', vol['autoexpand']]) + rows.append(['warning', vol['warning']]) + rows.append(['grainsize', vol['grainsize']]) + rows.append(['easy_tier', vol['easy_tier']]) + rows.append(['compressed_copy', vol['compressed_copy']]) + + if 'nohdr' in kwargs: + for index in range(len(rows)): + rows[index] = ' '.join(rows[index][1:]) + + if 'delim' in kwargs: + for index in range(len(rows)): + rows[index] = kwargs['delim'].join(rows[index]) + + return ('%s' % '\n'.join(rows), '') + + def _add_port_to_host(self, host_info, **kwargs): + if 'iscsiname' in kwargs: + added_key = 'iscsi_names' + added_val = kwargs['iscsiname'].strip('\'\"') + elif 'hbawwpn' in kwargs: + added_key = 'wwpns' + added_val = kwargs['hbawwpn'].strip('\'\"') + else: + return self._errors['CMMVC5707E'] + + host_info[added_key].append(added_val) + + for k, v in self._hosts_list.iteritems(): + if v['id'] == host_info['id']: + continue + for port in v[added_key]: + if port == added_val: + return self._errors['CMMVC6581E'] + return ('', '') + + # Make a host + def _cmd_mkhost(self, **kwargs): + host_info = {} + host_info['id'] = self._find_unused_id(self._hosts_list) + + if 'name' in kwargs: + host_name = kwargs['name'].strip('\'\"') + else: + host_name = 'host' + str(host_info['id']) + + if self._is_invalid_name(host_name): + return self._errors['CMMVC6527E'] + + if host_name in self._hosts_list: + return self._errors['CMMVC6035E'] + + host_info['host_name'] = host_name + host_info['iscsi_names'] = [] + host_info['wwpns'] = [] + + out, err = self._add_port_to_host(host_info, **kwargs) + if not len(err): + self._hosts_list[host_name] = host_info + return ('Host, id [%s], successfully created' % + (host_info['id']), '') + else: + return (out, err) + + # Add ports to an existing host + def _cmd_addhostport(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + host_name = kwargs['obj'].strip('\'\'') + + if host_name not in self._hosts_list: + return self._errors['CMMVC5753E'] + + host_info = self._hosts_list[host_name] + return self._add_port_to_host(host_info, **kwargs) + + # Change host properties + def _cmd_chhost(self, **kwargs): + if 'chapsecret' not in kwargs: + return self._errors['CMMVC5707E'] + secret = kwargs['obj'].strip('\'\'') + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + host_name = kwargs['obj'].strip('\'\'') + + if host_name not in self._hosts_list: + return self._errors['CMMVC5753E'] + + self._hosts_list[host_name]['chapsecret'] = secret + return ('', '') + + # Remove a host + def _cmd_rmhost(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + + host_name = kwargs['obj'].strip('\'\'') + if host_name not in self._hosts_list: + return self._errors['CMMVC5753E'] + + for k, v in self._mappings_list.iteritems(): + if (v['host'] == host_name): + return self._errors['CMMVC5871E'] + + del self._hosts_list[host_name] + return ('', '') + + # List information about hosts + def _cmd_lshost(self, **kwargs): + if 'obj' not in kwargs: + rows = [] + rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) + + found = False + for k, host in self._hosts_list.iteritems(): + filterstr = 'name=' + host['host_name'] + if (('filtervalue' not in kwargs) or + (kwargs['filtervalue'] == filterstr)): + rows.append([host['id'], host['host_name'], '1', '4', + 'offline']) + found = True + if found: + return self._print_info_cmd(rows=rows, **kwargs) + else: + return ('', '') + else: + if kwargs['obj'] not in self._hosts_list: + return self._errors['CMMVC5754E'] + host = self._hosts_list[kwargs['obj']] + rows = [] + rows.append(['id', host['id']]) + rows.append(['name', host['host_name']]) + rows.append(['port_count', '1']) + rows.append(['type', 'generic']) + rows.append(['mask', '1111']) + rows.append(['iogrp_count', '4']) + rows.append(['status', 'online']) + for port in host['iscsi_names']: + rows.append(['iscsi_name', port]) + rows.append(['node_logged_in_count', '0']) + rows.append(['state', 'offline']) + for port in host['wwpns']: + rows.append(['WWPN', port]) + rows.append(['node_logged_in_count', '0']) + rows.append(['state', 'active']) + + if 'nohdr' in kwargs: + for index in range(len(rows)): + rows[index] = ' '.join(rows[index][1:]) + + if 'delim' in kwargs: + for index in range(len(rows)): + rows[index] = kwargs['delim'].join(rows[index]) + + return ('%s' % '\n'.join(rows), '') + + # List iSCSI authorization information about hosts + def _cmd_lsiscsiauth(self, **kwargs): + if self._next_cmd_error['lsiscsiauth'] == 'no_info': + self._next_cmd_error['lsiscsiauth'] = '' + return ('', '') + rows = [] + rows.append(['type', 'id', 'name', 'iscsi_auth_method', + 'iscsi_chap_secret']) + + for k, host in self._hosts_list.iteritems(): + method = 'none' + secret = '' + if 'chapsecret' in host: + method = 'chap' + secret = host['chapsecret'] + rows.append(['host', host['id'], host['host_name'], method, + secret]) + return self._print_info_cmd(rows=rows, **kwargs) + + # Create a vdisk-host mapping + def _cmd_mkvdiskhostmap(self, **kwargs): + mapping_info = {} + mapping_info['id'] = self._find_unused_id(self._mappings_list) + + if 'host' not in kwargs: + return self._errors['CMMVC5707E'] + mapping_info['host'] = kwargs['host'].strip('\'\'') + + if 'scsi' not in kwargs: + return self._errors['CMMVC5707E'] + mapping_info['lun'] = kwargs['scsi'].strip('\'\'') + + if 'obj' not in kwargs: + return self._errors['CMMVC5707E'] + mapping_info['vol'] = kwargs['obj'].strip('\'\'') + + if mapping_info['vol'] not in self._volumes_list: + return self._errors['CMMVC5753E'] + + if mapping_info['host'] not in self._hosts_list: + return self._errors['CMMVC5754E'] + + if mapping_info['vol'] in self._mappings_list: + return self._errors['CMMVC6071E'] + + for k, v in self._mappings_list.iteritems(): + if ((v['host'] == mapping_info['host']) and + (v['lun'] == mapping_info['lun'])): + return self._errors['CMMVC5879E'] + + if kwargs.get('host', '').startswith('duplicate_mapping'): + if 'force' in kwargs: + self._mappings_list[mapping_info['vol']] = mapping_info + return ('Virtual Disk to Host map, id [%s], ' + 'successfully created' % (mapping_info['id']), '') + else: + return self._errors['CMMVC6071E'] + else: + self._mappings_list[mapping_info['vol']] = mapping_info + return ('Virtual Disk to Host map, id [%s], successfully created' + % (mapping_info['id']), '') + + # Delete a vdisk-host mapping + def _cmd_rmvdiskhostmap(self, **kwargs): + if 'host' not in kwargs: + return self._errors['CMMVC5707E'] + host = kwargs['host'].strip('\'\'') + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol = kwargs['obj'].strip('\'\'') + + if vol not in self._mappings_list: + return self._errors['CMMVC5753E'] + + if self._mappings_list[vol]['host'] != host: + return self._errors['CMMVC5753E'] + + del self._mappings_list[vol] + return ('', '') + + # List information about vdisk-host mappings + def _cmd_lshostvdiskmap(self, **kwargs): + index = 1 + no_hdr = 0 + delimeter = '' + host_name = kwargs['obj'] + + if host_name not in self._hosts_list: + return self._errors['CMMVC5754E'] + + rows = [] + rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', + 'vdisk_UID']) + + for k, mapping in self._mappings_list.iteritems(): + if (host_name == '') or (mapping['host'] == host_name): + volume = self._volumes_list[mapping['vol']] + rows.append([mapping['id'], mapping['host'], + mapping['lun'], volume['id'], + volume['name'], volume['uid']]) + + return self._print_info_cmd(rows=rows, **kwargs) + + # Create a FlashCopy mapping + def _cmd_mkfcmap(self, **kwargs): + source = '' + target = '' + copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50' + + if 'source' not in kwargs: + return self._errors['CMMVC5707E'] + source = kwargs['source'].strip('\'\'') + if source not in self._volumes_list: + return self._errors['CMMVC5754E'] + + if 'target' not in kwargs: + return self._errors['CMMVC5707E'] + target = kwargs['target'].strip('\'\'') + if target not in self._volumes_list: + return self._errors['CMMVC5754E'] + + if source == target: + return self._errors['CMMVC6303E'] + + if (self._volumes_list[source]['capacity'] != + self._volumes_list[target]['capacity']): + return self._errors['CMMVC5924E'] + + fcmap_info = {} + fcmap_info['source'] = source + fcmap_info['target'] = target + fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) + fcmap_info['name'] = 'fcmap' + fcmap_info['id'] + fcmap_info['copyrate'] = copyrate + fcmap_info['progress'] = '0' + fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False + fcmap_info['status'] = 'idle_or_copied' + self._fcmappings_list[fcmap_info['id']] = fcmap_info + + return('FlashCopy Mapping, id [' + fcmap_info['id'] + + '], successfully created', '') + + def _cmd_gen_prestartfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + + if self._next_cmd_error['prestartfcmap'] == 'bad_id': + id_num = -1 + self._next_cmd_error['prestartfcmap'] = '' + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + return self._state_transition('prepare', fcmap) + + def _cmd_gen_startfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + + if self._next_cmd_error['startfcmap'] == 'bad_id': + id_num = -1 + self._next_cmd_error['startfcmap'] = '' + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + return self._state_transition('start', fcmap) + + def _cmd_stopfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + return self._state_transition('stop', fcmap) + + def _cmd_rmfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + force = True if 'force' in kwargs else False + + if self._next_cmd_error['rmfcmap'] == 'bad_id': + id_num = -1 + self._next_cmd_error['rmfcmap'] = '' + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + function = 'delete_force' if force else 'delete' + ret = self._state_transition(function, fcmap) + if fcmap['status'] == 'end': + del self._fcmappings_list[id_num] + return ret + + def _cmd_lsvdiskfcmappings(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5707E'] + vdisk = kwargs['obj'] + rows = [] + rows.append(['id', 'name']) + for k, v in self._fcmappings_list.iteritems(): + if v['source'] == vdisk or v['target'] == vdisk: + rows.append([v['id'], v['name']]) + return self._print_info_cmd(rows=rows, **kwargs) + + def _cmd_chfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5707E'] + id_num = kwargs['obj'] + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + for key in ['name', 'copyrate', 'autodelete']: + if key in kwargs: + fcmap[key] = kwargs[key] + return ('', '') + + def _cmd_lsfcmap(self, **kwargs): + rows = [] + rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name', + 'target_vdisk_id', 'target_vdisk_name', 'group_id', + 'group_name', 'status', 'progress', 'copy_rate', + 'clean_progress', 'incremental', 'partner_FC_id', + 'partner_FC_name', 'restoring', 'start_time', + 'rc_controlled']) + + # Assume we always get a filtervalue argument + filter_key = kwargs['filtervalue'].split('=')[0] + filter_value = kwargs['filtervalue'].split('=')[1] + to_delete = [] + for k, v in self._fcmappings_list.iteritems(): + if str(v[filter_key]) == filter_value: + source = self._volumes_list[v['source']] + target = self._volumes_list[v['target']] + self._state_transition('wait', v) + + if self._next_cmd_error['lsfcmap'] == 'speed_up': + self._next_cmd_error['lsfcmap'] = '' + curr_state = v['status'] + while self._state_transition('wait', v) == ("", ""): + if curr_state == v['status']: + break + curr_state = v['status'] + + if ((v['status'] == 'idle_or_copied' and v['autodelete'] and + v['progress'] == '100') or (v['status'] == 'end')): + to_delete.append(k) + else: + rows.append([v['id'], v['name'], source['id'], + source['name'], target['id'], target['name'], + '', '', v['status'], v['progress'], + v['copyrate'], '100', 'off', '', '', 'no', '', + 'no']) + + for d in to_delete: + del self._fcmappings_list[k] + + return self._print_info_cmd(rows=rows, **kwargs) + + # Add host to list + def _add_host_to_list(self, connector): + host_info = {} + host_info['id'] = self._find_unused_id(self._hosts_list) + host_info['host_name'] = connector['host'] + host_info['iscsi_names'] = [] + host_info['wwpns'] = [] + if 'initiator' in connector: + host_info['iscsi_names'].append(connector['initiator']) + if 'wwpns' in connector: + host_info['wwpns'] = host_info['wwpns'] + connector['wwpns'] + self._hosts_list[connector['host']] = host_info + + # The main function to run commands on the management simulator + def execute_command(self, cmd, check_exit_code=True): + try: + kwargs = self._cmd_to_dict(cmd) + except IndexError: + return self._errors['CMMVC5707E'] + + command = kwargs['cmd'] + del kwargs['cmd'] + arg_list = cmd.split() + + if command == 'lsmdiskgrp': + out, err = self._cmd_lsmdiskgrp(**kwargs) + elif command == 'lslicense': + out, err = self._cmd_lslicense(**kwargs) + elif command == 'lssystem': + out, err = self._cmd_lssystem(**kwargs) + elif command == 'lsnodecanister': + out, err = self._cmd_lsnodecanister(**kwargs) + elif command == 'lsnode': + out, err = self._cmd_lsnode(**kwargs) + elif command == 'lsportip': + out, err = self._cmd_lsportip(**kwargs) + elif command == 'lsfabric': + out, err = self._cmd_lsfabric(**kwargs) + elif command == 'mkvdisk': + out, err = self._cmd_mkvdisk(**kwargs) + elif command == 'rmvdisk': + out, err = self._cmd_rmvdisk(**kwargs) + elif command == 'lsvdisk': + out, err = self._cmd_lsvdisk(**kwargs) + elif command == 'mkhost': + out, err = self._cmd_mkhost(**kwargs) + elif command == 'addhostport': + out, err = self._cmd_addhostport(**kwargs) + elif command == 'chhost': + out, err = self._cmd_chhost(**kwargs) + elif command == 'rmhost': + out, err = self._cmd_rmhost(**kwargs) + elif command == 'lshost': + out, err = self._cmd_lshost(**kwargs) + elif command == 'lsiscsiauth': + out, err = self._cmd_lsiscsiauth(**kwargs) + elif command == 'mkvdiskhostmap': + out, err = self._cmd_mkvdiskhostmap(**kwargs) + elif command == 'rmvdiskhostmap': + out, err = self._cmd_rmvdiskhostmap(**kwargs) + elif command == 'lshostvdiskmap': + out, err = self._cmd_lshostvdiskmap(**kwargs) + elif command == 'mkfcmap': + out, err = self._cmd_mkfcmap(**kwargs) + elif command == 'prestartfcmap': + out, err = self._cmd_gen_prestartfcmap(**kwargs) + elif command == 'startfcmap': + out, err = self._cmd_gen_startfcmap(**kwargs) + elif command == 'stopfcmap': + out, err = self._cmd_stopfcmap(**kwargs) + elif command == 'rmfcmap': + out, err = self._cmd_rmfcmap(**kwargs) + elif command == 'chfcmap': + out, err = self._cmd_chfcmap(**kwargs) + elif command == 'lsfcmap': + out, err = self._cmd_lsfcmap(**kwargs) + elif command == 'lsvdiskfcmappings': + out, err = self._cmd_lsvdiskfcmappings(**kwargs) + else: + out, err = ('', 'ERROR: Unsupported command') + + if (check_exit_code) and (len(err) != 0): + raise exception.ProcessExecutionError(exit_code=1, + stdout=out, + stderr=err, + cmd=' '.join(cmd)) + + return (out, err) + + # After calling this function, the next call to the specified command will + # result in in the error specified + def error_injection(self, cmd, error): + self._next_cmd_error[cmd] = error + + +class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver): + def __init__(self, *args, **kwargs): + super(StorwizeSVCFakeDriver, self).__init__(*args, **kwargs) + + def set_fake_storage(self, fake): + self.fake_storage = fake + + def _run_ssh(self, cmd, check_exit_code=True): + try: + LOG.debug(_('Run CLI command: %s') % cmd) + ret = self.fake_storage.execute_command(cmd, check_exit_code) + (stdout, stderr) = ret + LOG.debug(_('CLI output:\n stdout: %(stdout)s\n stderr: ' + '%(stderr)s') % {'stdout': stdout, 'stderr': stderr}) + + except exception.ProcessExecutionError as e: + with excutils.save_and_reraise_exception(): + LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n ' + 'stderr: %(err)s') % {'out': e.stdout, + 'err': e.stderr}) + + return ret + + +class StorwizeSVCFakeSock: + def settimeout(self, time): + return + + +class StorwizeSVCDriverTestCase(test.TestCase): + def setUp(self): + super(StorwizeSVCDriverTestCase, self).setUp() + self.USESIM = True + if self.USESIM: + self.driver = StorwizeSVCFakeDriver( + configuration=conf.Configuration(None)) + self._def_flags = {'san_ip': 'hostname', + 'san_login': 'user', + 'san_password': 'pass', + 'storwize_svc_flashcopy_timeout': 20, + # Test ignore capitalization + 'storwize_svc_connection_protocol': 'iScSi', + 'storwize_svc_multipath_enabled': False} + self._host_name = 'storwize-svc-test' + self._host_ip = '1.234.56.78' + self._host_wwpns = [ + str(random.randint(0, 9999999999999999)).zfill(16), + str(random.randint(0, 9999999999999999)).zfill(16)] + self._iscsi_name = ('test.initiator.%s' % + str(random.randint(10000, 99999))) + self.sim = StorwizeSVCManagementSimulator('volpool') + + self.driver.set_fake_storage(self.sim) + else: + self.driver = storwize_svc.StorwizeSVCDriver( + configuration=conf.Configuration(None)) + self._def_flags = {'san_ip': '1.111.11.11', + 'san_login': 'user', + 'san_password': 'password', + 'storwize_svc_volpool_name': 'openstack', + # Test ignore capitalization + 'storwize_svc_connection_protocol': 'iScSi', + 'storwize_svc_multipath_enabled': False, + 'ssh_conn_timeout': 0} + self._host_name = socket.gethostname() + self._host_ip = socket.gethostbyname(self._host_name) + + self._host_wwpns = [] + out, err = utils.execute('systool', '-c', 'fc_host', '-v', + run_as_root=True) + lines = out.split('\n') + for line in lines: + val = line.split('=') + if (len(val) == 2 and + val[0].strip().replace(" ", "") == 'port_name'): + self._host_wwpns.append(val[1].strip()[3:-1]) + self.assertNotEqual(len(self._host_wwpns), 0) + + lines = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') + for l in lines.split('\n'): + if l.startswith('InitiatorName='): + self._iscsi_name = l[l.index('=') + 1:].strip() + + self._reset_flags() + self.driver.db = StorwizeSVCFakeDB() + self.driver.do_setup(None) + self.driver.check_for_setup_error() + self.stubs.Set(storwize_svc.time, 'sleep', lambda s: None) + + def _set_flag(self, flag, value): + group = self.driver.configuration.config_group + self.driver.configuration.set_override(flag, value, group) + + def _reset_flags(self): + self.driver.configuration.local_conf.reset() + for k, v in self._def_flags.iteritems(): + self._set_flag(k, v) + + def _assert_vol_exists(self, name, exists): + is_vol_defined = self.driver._is_vdisk_defined(name) + self.assertEqual(is_vol_defined, exists) + + def test_storwize_svc_connectivity(self): + # Make sure we detect if the pool doesn't exist + no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) + self._set_flag('storwize_svc_volpool_name', no_exist_pool) + self.assertRaises(exception.InvalidInput, + self.driver.do_setup, None) + self._reset_flags() + + # Check the case where the user didn't configure IP addresses + # as well as receiving unexpected results from the storage + if self.USESIM: + self.sim.error_injection('lsnodecanister', 'header_mismatch') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.do_setup, None) + self.sim.error_injection('lsnodecanister', 'remove_field') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.do_setup, None) + self.sim.error_injection('lsportip', 'header_mismatch') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.do_setup, None) + self.sim.error_injection('lsportip', 'remove_field') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.do_setup, None) + + # Check with bad parameters + self._set_flag('san_ip', '') + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('san_password', None) + self._set_flag('san_private_key', None) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_vol_rsize', 101) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_vol_warning', 101) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_vol_grainsize', 42) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_flashcopy_timeout', 601) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_vol_compression', True) + self._set_flag('storwize_svc_vol_rsize', -1) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_connection_protocol', 'foo') + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_connection_protocol', 'iSCSI') + self._set_flag('storwize_svc_multipath_enabled', True) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + if self.USESIM: + self.sim.error_injection('lslicense', 'no_compression') + self._set_flag('storwize_svc_vol_compression', True) + self.driver.do_setup(None) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + # Finally, check with good parameters + self.driver.do_setup(None) + + def _generate_vol_info(self, vol_name, vol_id): + rand_id = str(random.randint(10000, 99999)) + if vol_name: + return {'name': 'snap_volume%s' % rand_id, + 'volume_name': vol_name, + 'id': rand_id, + 'volume_id': vol_id, + 'volume_size': 10} + else: + return {'name': 'test_volume%s' % rand_id, + 'size': 10, + 'id': '%s' % rand_id, + 'volume_type_id': None} + + def _create_test_vol(self, opts): + ctxt = context.get_admin_context() + type_ref = volume_types.create(ctxt, 'testtype', opts) + volume = self._generate_vol_info(None, None) + volume['volume_type_id'] = type_ref['id'] + self.driver.create_volume(volume) + + attrs = self.driver._get_vdisk_attributes(volume['name']) + self.driver.delete_volume(volume) + volume_types.destroy(ctxt, type_ref['id']) + return attrs + + def _fail_prepare_fc_map(self, fc_map_id, source, target): + raise exception.ProcessExecutionError(exit_code=1, + stdout='', + stderr='unit-test-fail', + cmd='prestartfcmap id') + + def test_storwize_svc_snapshots(self): + vol1 = self._generate_vol_info(None, None) + self.driver.create_volume(vol1) + self.driver.db.volume_set(vol1) + snap1 = self._generate_vol_info(vol1['name'], vol1['id']) + + # Test timeout and volume cleanup + self._set_flag('storwize_svc_flashcopy_timeout', 1) + self.assertRaises(exception.InvalidSnapshot, + self.driver.create_snapshot, snap1) + self._assert_vol_exists(snap1['name'], False) + self._reset_flags() + + # Test prestartfcmap, startfcmap, and rmfcmap failing + orig = self.driver._call_prepare_fc_map + self.driver._call_prepare_fc_map = self._fail_prepare_fc_map + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_snapshot, snap1) + self.driver._call_prepare_fc_map = orig + + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.sim.error_injection('startfcmap', 'bad_id') + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_snapshot, snap1) + self._assert_vol_exists(snap1['name'], False) + self.sim.error_injection('prestartfcmap', 'bad_id') + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_snapshot, snap1) + self._assert_vol_exists(snap1['name'], False) + + # Test successful snapshot + self.driver.create_snapshot(snap1) + self._assert_vol_exists(snap1['name'], True) + + # Try to create a snapshot from an non-existing volume - should fail + snap_novol = self._generate_vol_info('undefined-vol', '12345') + self.assertRaises(exception.VolumeNotFound, + self.driver.create_snapshot, + snap_novol) + + # We support deleting a volume that has snapshots, so delete the volume + # first + self.driver.delete_volume(vol1) + self.driver.delete_snapshot(snap1) + + def test_storwize_svc_create_volfromsnap_clone(self): + vol1 = self._generate_vol_info(None, None) + self.driver.create_volume(vol1) + self.driver.db.volume_set(vol1) + snap1 = self._generate_vol_info(vol1['name'], vol1['id']) + self.driver.create_snapshot(snap1) + vol2 = self._generate_vol_info(None, None) + vol3 = self._generate_vol_info(None, None) + + # Try to create a volume from a non-existing snapshot + snap_novol = self._generate_vol_info('undefined-vol', '12345') + vol_novol = self._generate_vol_info(None, None) + self.assertRaises(exception.SnapshotNotFound, + self.driver.create_volume_from_snapshot, + vol_novol, + snap_novol) + + # Fail the snapshot + orig = self.driver._call_prepare_fc_map + self.driver._call_prepare_fc_map = self._fail_prepare_fc_map + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_volume_from_snapshot, + vol2, snap1) + self.driver._call_prepare_fc_map = orig + self._assert_vol_exists(vol2['name'], False) + + # Try to create where source size != target size + vol2['size'] += 1 + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + vol2, snap1) + self._assert_vol_exists(vol2['name'], False) + vol2['size'] -= 1 + + # Succeed + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_volume_from_snapshot(vol2, snap1) + self._assert_vol_exists(vol2['name'], True) + + # Try to clone where source size != target size + vol3['size'] += 1 + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + vol3, vol2) + self._assert_vol_exists(vol3['name'], False) + vol3['size'] -= 1 + + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_cloned_volume(vol3, vol2) + self._assert_vol_exists(vol3['name'], True) + + # Delete in the 'opposite' order to make sure it works + self.driver.delete_volume(vol3) + self._assert_vol_exists(vol3['name'], False) + self.driver.delete_volume(vol2) + self._assert_vol_exists(vol2['name'], False) + self.driver.delete_snapshot(snap1) + self._assert_vol_exists(snap1['name'], False) + self.driver.delete_volume(vol1) + self._assert_vol_exists(vol1['name'], False) + + def test_storwize_svc_volumes(self): + # Create a first volume + volume = self._generate_vol_info(None, None) + self.driver.create_volume(volume) + + self.driver.ensure_export(None, volume) + + # Do nothing + self.driver.create_export(None, volume) + self.driver.remove_export(None, volume) + + # Make sure volume attributes are as they should be + attributes = self.driver._get_vdisk_attributes(volume['name']) + attr_size = float(attributes['capacity']) / (1024 ** 3) # bytes to GB + self.assertEqual(attr_size, float(volume['size'])) + pool = self.driver.configuration.local_conf.storwize_svc_volpool_name + self.assertEqual(attributes['mdisk_grp_name'], pool) + + # Try to create the volume again (should fail) + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_volume, + volume) + + # Try to delete a volume that doesn't exist (should not fail) + vol_no_exist = {'name': 'i_dont_exist'} + self.driver.delete_volume(vol_no_exist) + # Ensure export for volume that doesn't exist (should not fail) + self.driver.ensure_export(None, vol_no_exist) + + # Delete the volume + self.driver.delete_volume(volume) + + def test_storwize_svc_volume_params(self): + # Option test matrix + # Option Value Covered by test # + # rsize -1 1 + # rsize 2 2,3 + # warning 0 2 + # warning 80 3 + # autoexpand True 2 + # autoexpand False 3 + # grainsize 32 2 + # grainsize 256 3 + # compression True 4 + # compression False 2,3 + # easytier True 1,3 + # easytier False 2 + + opts_list = [] + chck_list = [] + opts_list.append({'rsize': -1, 'easytier': True}) + chck_list.append({'free_capacity': '0', 'easy_tier': 'on'}) + opts_list.append({'rsize': 2, 'compression': False, 'warning': 0, + 'autoexpand': True, 'grainsize': 32, + 'easytier': False}) + chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', + 'warning': '0', 'autoexpand': 'on', + 'grainsize': '32', 'easy_tier': 'off'}) + opts_list.append({'rsize': 2, 'compression': False, 'warning': 80, + 'autoexpand': False, 'grainsize': 256, + 'easytier': True}) + chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', + 'warning': '80', 'autoexpand': 'off', + 'grainsize': '256', 'easy_tier': 'on'}) + opts_list.append({'rsize': 2, 'compression': True}) + chck_list.append({'-free_capacity': '0', + 'compressed_copy': 'yes'}) + + for idx in range(len(opts_list)): + attrs = self._create_test_vol(opts_list[idx]) + for k, v in chck_list[idx].iteritems(): + try: + if k[0] == '-': + k = k[1:] + self.assertNotEqual(attrs[k], v) + else: + self.assertEqual(attrs[k], v) + except exception.ProcessExecutionError as e: + if 'CMMVC7050E' not in e.stderr: + raise e + + def test_storwize_svc_unicode_host_and_volume_names(self): + # We'll check with iSCSI only - nothing protocol-dependednt here + self._set_flag('storwize_svc_connection_protocol', 'iSCSI') + self.driver.do_setup(None) + + rand_id = random.randint(10000, 99999) + volume1 = {'name': u'unicode1_volume%s' % rand_id, + 'size': 2, + 'id': 1, + 'volume_type_id': None} + self.driver.create_volume(volume1) + self._assert_vol_exists(volume1['name'], True) + + self.assertRaises(exception.NoValidHost, + self.driver._connector_to_hostname_prefix, + {'host': 12345}) + + # Add a a host first to make life interesting (this host and + # conn['host'] should be translated to the same prefix, and the + # initiator should differentiate + tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id, + 'ip': '10.10.10.10', + 'host': u'unicode.foo}.bar{.baz-%s' % rand_id} + self.driver._create_host(tmpconn1) + + # Add a host with a different prefix + tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id, + 'ip': '10.10.10.11', + 'host': u'unicode.hello.world-%s' % rand_id} + self.driver._create_host(tmpconn2) + + conn = {'initiator': u'unicode:initiator3.%s' % rand_id, + 'ip': '10.10.10.12', + 'host': u'unicode.foo}.bar}.baz-%s' % rand_id} + self.driver.initialize_connection(volume1, conn) + host_name = self.driver._get_host_from_connector(conn) + self.assertNotEqual(host_name, None) + self.driver.terminate_connection(volume1, conn) + host_name = self.driver._get_host_from_connector(conn) + self.assertEqual(host_name, None) + self.driver.delete_volume(volume1) + + # Clean up temporary hosts + for tmpconn in [tmpconn1, tmpconn2]: + host_name = self.driver._get_host_from_connector(tmpconn) + self.assertNotEqual(host_name, None) + self.driver._delete_host(host_name) + + def test_storwize_svc_host_maps(self): + # Create two volumes to be used in mappings + + ctxt = context.get_admin_context() + volume1 = self._generate_vol_info(None, None) + self.driver.create_volume(volume1) + volume2 = self._generate_vol_info(None, None) + self.driver.create_volume(volume2) + + # Create volume types that we created + types = {} + for protocol in ['FC', 'iSCSI']: + opts = {'storage_protocol': ' ' + protocol} + types[protocol] = volume_types.create(ctxt, protocol, opts) + + conn = {'initiator': self._iscsi_name, + 'ip': self._host_ip, + 'host': self._host_name, + 'wwpns': self._host_wwpns} + + for protocol in ['FC', 'iSCSI']: + volume1['volume_type_id'] = types[protocol]['id'] + volume2['volume_type_id'] = types[protocol]['id'] + + # Check case where no hosts exist + if self.USESIM: + ret = self.driver._get_host_from_connector(conn) + self.assertEqual(ret, None) + + # Make sure that the volumes have been created + self._assert_vol_exists(volume1['name'], True) + self._assert_vol_exists(volume2['name'], True) + + # Initialize connection from the first volume to a host + self.driver.initialize_connection(volume1, conn) + + # Initialize again, should notice it and do nothing + self.driver.initialize_connection(volume1, conn) + + # Try to delete the 1st volume (should fail because it is mapped) + self.assertRaises(exception.ProcessExecutionError, + self.driver.delete_volume, + volume1) + + # Check bad output from lsfabric for the 2nd volume + if protocol == 'FC' and self.USESIM: + for error in ['remove_field', 'header_mismatch']: + self.sim.error_injection('lsfabric', error) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + volume2, conn) + + self.driver.terminate_connection(volume1, conn) + if self.USESIM: + host_name = self.driver._get_host_from_connector(conn) + self.assertEqual(host_name, None) + + # Check cases with no auth set for host + if self.USESIM: + for case in ['no_info', 'no_auth_set']: + conn_na = {'initiator': 'test:init:%s' % + random.randint(10000, 99999), + 'ip': '11.11.11.11', + 'host': 'host-%s' % case} + self.sim._add_host_to_list(conn_na) + volume1['volume_type_id'] = types['iSCSI']['id'] + if case == 'no_info': + self.sim.error_injection('lsiscsiauth', 'no_info') + self.driver.initialize_connection(volume1, conn_na) + ret = self.driver._get_chap_secret_for_host(conn_na['host']) + self.assertNotEqual(ret, None) + self.driver.terminate_connection(volume1, conn_na) + + # Test no preferred node + if self.USESIM: + self.sim.error_injection('lsvdisk', 'no_pref_node') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + volume1, conn) + + # Initialize connection from the second volume to the host with no + # preferred node set if in simulation mode, otherwise, just + # another initialize connection. + if self.USESIM: + self.sim.error_injection('lsvdisk', 'blank_pref_node') + self.driver.initialize_connection(volume2, conn) + + # Try to remove connection from host that doesn't exist (should fail) + conn_no_exist = conn.copy() + conn_no_exist['initiator'] = 'i_dont_exist' + conn_no_exist['wwpns'] = ['0000000000000000'] + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + volume1, + conn_no_exist) + + # Try to remove connection from volume that isn't mapped (should print + # message but NOT fail) + vol_no_exist = {'name': 'i_dont_exist'} + self.driver.terminate_connection(vol_no_exist, conn) + + # Remove the mapping from the 1st volume and delete it + self.driver.terminate_connection(volume1, conn) + self.driver.delete_volume(volume1) + self._assert_vol_exists(volume1['name'], False) + + # Make sure our host still exists + host_name = self.driver._get_host_from_connector(conn) + self.assertNotEqual(host_name, None) + + # Remove the mapping from the 2nd volume and delete it. The host should + # be automatically removed because there are no more mappings. + self.driver.terminate_connection(volume2, conn) + self.driver.delete_volume(volume2) + self._assert_vol_exists(volume2['name'], False) + + # Delete volume types that we created + for protocol in ['FC', 'iSCSI']: + volume_types.destroy(ctxt, types[protocol]['id']) + + # Check if our host still exists (it should not) + ret = self.driver._get_host_from_connector(conn) + self.assertEqual(ret, None) + + def test_storwize_svc_multi_host_maps(self): + # Create a volume to be used in mappings + ctxt = context.get_admin_context() + volume = self._generate_vol_info(None, None) + self.driver.create_volume(volume) + + # Create volume types that we created + types = {} + for protocol in ['FC', 'iSCSI']: + opts = {'storage_protocol': ' ' + protocol} + types[protocol] = volume_types.create(ctxt, protocol, opts) + + conn = {'initiator': self._iscsi_name, + 'ip': '11.11.11.11', + 'host': 'duplicate_mapping'} + + for protocol in ['FC', 'iSCSI']: + volume['volume_type_id'] = types[protocol]['id'] + + # Make sure that the volumes have been created + self._assert_vol_exists(volume['name'], True) + + self.driver.initialize_connection(volume, conn) + self.driver.terminate_connection(volume, conn) + + self._set_flag('storwize_svc_multihostmap_enabled', False) + self.assertRaises(exception.CinderException, + self.driver.initialize_connection, volume, conn) + self.driver.terminate_connection(volume, conn) + self._reset_flags() + + def test_storwize_svc_delete_volume_snapshots(self): + # Create a volume with two snapshots + master = self._generate_vol_info(None, None) + self.driver.create_volume(master) + self.driver.db.volume_set(master) + + # Fail creating a snapshot - will force delete the snapshot + if self.USESIM and False: + snap = self._generate_vol_info(master['name'], master['id']) + self.sim.error_injection('startfcmap', 'bad_id') + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_snapshot, snap) + self._assert_vol_exists(snap['name'], False) + + # Delete a snapshot + snap = self._generate_vol_info(master['name'], master['id']) + self.driver.create_snapshot(snap) + self._assert_vol_exists(snap['name'], True) + self.driver.delete_snapshot(snap) + self._assert_vol_exists(snap['name'], False) + + # Delete a volume with snapshots (regular) + snap = self._generate_vol_info(master['name'], master['id']) + self.driver.create_snapshot(snap) + self._assert_vol_exists(snap['name'], True) + self.driver.delete_volume(master) + self._assert_vol_exists(master['name'], False) + + # Fail create volume from snapshot - will force delete the volume + if self.USESIM: + volfs = self._generate_vol_info(None, None) + self.sim.error_injection('startfcmap', 'bad_id') + self.sim.error_injection('lsfcmap', 'speed_up') + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_volume_from_snapshot, + volfs, snap) + self._assert_vol_exists(volfs['name'], False) + + # Create volume from snapshot and delete it + volfs = self._generate_vol_info(None, None) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_volume_from_snapshot(volfs, snap) + self._assert_vol_exists(volfs['name'], True) + self.driver.delete_volume(volfs) + self._assert_vol_exists(volfs['name'], False) + + # Create volume from snapshot and delete the snapshot + volfs = self._generate_vol_info(None, None) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_volume_from_snapshot(volfs, snap) + self.driver.delete_snapshot(snap) + self._assert_vol_exists(snap['name'], False) + + # Fail create clone - will force delete the target volume + if self.USESIM: + clone = self._generate_vol_info(None, None) + self.sim.error_injection('startfcmap', 'bad_id') + self.sim.error_injection('lsfcmap', 'speed_up') + self.assertRaises(exception.ProcessExecutionError, + self.driver.create_cloned_volume, + clone, volfs) + self._assert_vol_exists(clone['name'], False) + + # Create the clone, delete the source and target + clone = self._generate_vol_info(None, None) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_cloned_volume(clone, volfs) + self._assert_vol_exists(clone['name'], True) + self.driver.delete_volume(volfs) + self._assert_vol_exists(volfs['name'], False) + self.driver.delete_volume(clone) + self._assert_vol_exists(clone['name'], False) + + # Note defined in python 2.6, so define here... + def assertLessEqual(self, a, b, msg=None): + if not a <= b: + self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) + + def test_storwize_svc_get_volume_stats(self): + stats = self.driver.get_volume_stats() + self.assertLessEqual(stats['free_capacity_gb'], + stats['total_capacity_gb']) + if self.USESIM: + self.assertEqual(stats['volume_backend_name'], + 'storwize-svc-sim_volpool') + self.assertAlmostEqual(stats['total_capacity_gb'], 3328.0) + self.assertAlmostEqual(stats['free_capacity_gb'], 3287.5) + + +# The test case does not rely on Openstack runtime, +# so it should inherit from unittest.TestCase. +class CLIResponseTestCase(unittest.TestCase): + def test_empty(self): + self.assertEqual(0, len(storwize_svc.CLIResponse(''))) + self.assertEqual(0, len(storwize_svc.CLIResponse(('', 'stderr')))) + + def test_header(self): + raw = r'''id!name +1!node1 +2!node2 +''' + resp = storwize_svc.CLIResponse(raw, with_header=True) + self.assertEqual(2, len(resp)) + self.assertEqual('1', resp[0]['id']) + self.assertEqual('2', resp[1]['id']) + + def test_select(self): + raw = r'''id!123 +name!Bill +name!Bill2 +age!30 +home address!s1 +home address!s2 + +id! 7 +name!John +name!John2 +age!40 +home address!s3 +home address!s4 +''' + resp = storwize_svc.CLIResponse(raw, with_header=False) + self.assertEqual(list(resp.select('home address', 'name', + 'home address')), + [('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'), + ('s3', 'John', 's3'), ('s4', 'John2', 's4')]) + + def test_lsnode_all(self): + raw = r'''id!name!UPS_serial_number!WWNN!status +1!node1!!500507680200C744!online +2!node2!!500507680200C745!online +''' + resp = storwize_svc.CLIResponse(raw) + self.assertEqual(2, len(resp)) + self.assertEqual('1', resp[0]['id']) + self.assertEqual('500507680200C744', resp[0]['WWNN']) + self.assertEqual('2', resp[1]['id']) + self.assertEqual('500507680200C745', resp[1]['WWNN']) + + def test_lsnode_single(self): + raw = r'''id!1 +port_id!500507680210C744 +port_status!active +port_speed!8Gb +port_id!500507680240C744 +port_status!inactive +port_speed!8Gb +''' + resp = storwize_svc.CLIResponse(raw, with_header=False) + self.assertEqual(1, len(resp)) + self.assertEqual('1', resp[0]['id']) + self.assertEqual(list(resp.select('port_id', 'port_status')), + [('500507680210C744', 'active'), + ('500507680240C744', 'inactive')]) + +if __name__ == '__main__': + unittest.main() diff --git a/cinder/tests/test_test.py b/cinder/tests/test_test.py new file mode 100644 index 0000000000..fe62d9abd1 --- /dev/null +++ b/cinder/tests/test_test.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the testing base code.""" + +from cinder.openstack.common import rpc +from cinder import test + + +class IsolationTestCase(test.TestCase): + """Ensure that things are cleaned up after failed tests. + + These tests don't really do much here, but if isolation fails a bunch + of other tests should fail. + + """ + def test_service_isolation(self): + import os + print os.path.abspath(".") + self.start_service('volume') + + def test_rpc_consumer_isolation(self): + class NeverCalled(object): + + def __getattribute__(*args): + assert False, "I should never get called." + + connection = rpc.create_connection(new=True) + proxy = NeverCalled() + connection.create_consumer('volume', proxy, fanout=False) + connection.consume_in_thread() diff --git a/cinder/tests/test_test_utils.py b/cinder/tests/test_test_utils.py new file mode 100644 index 0000000000..c8c79b2519 --- /dev/null +++ b/cinder/tests/test_test_utils.py @@ -0,0 +1,28 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import test +from cinder.tests import utils as test_utils + + +class TestUtilsTestCase(test.TestCase): + def test_get_test_admin_context(self): + """get_test_admin_context's return value behaves like admin context.""" + ctxt = test_utils.get_test_admin_context() + + # TODO(soren): This should verify the full interface context + # objects expose. + self.assertTrue(ctxt.is_admin) diff --git a/cinder/tests/test_utils.py b/cinder/tests/test_utils.py new file mode 100644 index 0000000000..2e7b08629a --- /dev/null +++ b/cinder/tests/test_utils.py @@ -0,0 +1,728 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import __builtin__ +import datetime +import hashlib +import os +import os.path +import paramiko +import StringIO +import tempfile +import uuid + +import mox + +import cinder +from cinder import exception +from cinder import flags +from cinder.openstack.common import strutils +from cinder.openstack.common import timeutils +from cinder import test +from cinder import utils + + +FLAGS = flags.FLAGS + + +class ExecuteTestCase(test.TestCase): + def test_retry_on_failure(self): + fd, tmpfilename = tempfile.mkstemp() + _, tmpfilename2 = tempfile.mkstemp() + try: + fp = os.fdopen(fd, 'w+') + fp.write('''#!/bin/sh +# If stdin fails to get passed during one of the runs, make a note. +if ! grep -q foo +then + echo 'failure' > "$1" +fi +# If stdin has failed to get passed during this or a previous run, exit early. +if grep failure "$1" +then + exit 1 +fi +runs="$(cat $1)" +if [ -z "$runs" ] +then + runs=0 +fi +runs=$(($runs + 1)) +echo $runs > "$1" +exit 1 +''') + fp.close() + os.chmod(tmpfilename, 0755) + self.assertRaises(exception.ProcessExecutionError, + utils.execute, + tmpfilename, tmpfilename2, attempts=10, + process_input='foo', + delay_on_retry=False) + fp = open(tmpfilename2, 'r+') + runs = fp.read() + fp.close() + self.assertNotEquals(runs.strip(), 'failure', 'stdin did not ' + 'always get passed ' + 'correctly') + runs = int(runs.strip()) + self.assertEquals(runs, 10, + 'Ran %d times instead of 10.' % (runs,)) + finally: + os.unlink(tmpfilename) + os.unlink(tmpfilename2) + + def test_unknown_kwargs_raises_error(self): + self.assertRaises(exception.Error, + utils.execute, + '/usr/bin/env', 'true', + this_is_not_a_valid_kwarg=True) + + def test_check_exit_code_boolean(self): + utils.execute('/usr/bin/env', 'false', check_exit_code=False) + self.assertRaises(exception.ProcessExecutionError, + utils.execute, + '/usr/bin/env', 'false', check_exit_code=True) + + def test_no_retry_on_success(self): + fd, tmpfilename = tempfile.mkstemp() + _, tmpfilename2 = tempfile.mkstemp() + try: + fp = os.fdopen(fd, 'w+') + fp.write('''#!/bin/sh +# If we've already run, bail out. +grep -q foo "$1" && exit 1 +# Mark that we've run before. +echo foo > "$1" +# Check that stdin gets passed correctly. +grep foo +''') + fp.close() + os.chmod(tmpfilename, 0755) + utils.execute(tmpfilename, + tmpfilename2, + process_input='foo', + attempts=2) + finally: + os.unlink(tmpfilename) + os.unlink(tmpfilename2) + + +class GetFromPathTestCase(test.TestCase): + def test_tolerates_nones(self): + f = utils.get_from_path + + input = [] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [None] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': None}] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': None}}] + self.assertEquals([{'b': None}], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': None}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] + self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + def test_does_select(self): + f = utils.get_from_path + + input = [{'a': 'a_1'}] + self.assertEquals(['a_1'], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': 'b_1'}}] + self.assertEquals([{'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': None}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': {'c': 'c_2'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + f(input, "a")) + self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) + self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) + + self.assertEquals([], f(input, "a/b/c/d")) + self.assertEquals([], f(input, "c/a/b/d")) + self.assertEquals([], f(input, "i/r/t")) + + def test_flattens_lists(self): + f = utils.get_from_path + + input = [{'a': [1, 2, 3]}] + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}] + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [1, 2, {'b': 'b_1'}]}] + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + def test_bad_xpath(self): + f = utils.get_from_path + + self.assertRaises(exception.Error, f, [], None) + self.assertRaises(exception.Error, f, [], "") + self.assertRaises(exception.Error, f, [], "/") + self.assertRaises(exception.Error, f, [], "/a") + self.assertRaises(exception.Error, f, [], "/a/") + self.assertRaises(exception.Error, f, [], "//") + self.assertRaises(exception.Error, f, [], "//a") + self.assertRaises(exception.Error, f, [], "a//a") + self.assertRaises(exception.Error, f, [], "a//a/") + self.assertRaises(exception.Error, f, [], "a/a/") + + def test_real_failure1(self): + # Real world failure case... + # We weren't coping when the input was a Dictionary instead of a List + # This led to test_accepts_dictionaries + f = utils.get_from_path + + inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], + 'address': '192.168.0.3'}, + 'hostname': ''} + + private_ips = f(inst, 'fixed_ip/address') + public_ips = f(inst, 'fixed_ip/floating_ips/address') + self.assertEquals(['192.168.0.3'], private_ips) + self.assertEquals(['1.2.3.4'], public_ips) + + def test_accepts_dictionaries(self): + f = utils.get_from_path + + input = {'a': [1, 2, 3]} + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': {'b': [1, 2, 3]}} + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [1, 2, {'b': 'b_1'}]} + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + +class GenericUtilsTestCase(test.TestCase): + def test_hostname_unicode_sanitization(self): + hostname = u"\u7684.test.example.com" + self.assertEqual("test.example.com", + utils.sanitize_hostname(hostname)) + + def test_hostname_sanitize_periods(self): + hostname = "....test.example.com..." + self.assertEqual("test.example.com", + utils.sanitize_hostname(hostname)) + + def test_hostname_sanitize_dashes(self): + hostname = "----test.example.com---" + self.assertEqual("test.example.com", + utils.sanitize_hostname(hostname)) + + def test_hostname_sanitize_characters(self): + hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" + self.assertEqual("91----test-host.example.com-0", + utils.sanitize_hostname(hostname)) + + def test_hostname_translate(self): + hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" + self.assertEqual("hello", utils.sanitize_hostname(hostname)) + + def test_generate_glance_url(self): + generated_url = utils.generate_glance_url() + actual_url = "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) + self.assertEqual(generated_url, actual_url) + + def test_read_cached_file(self): + self.mox.StubOutWithMock(os.path, "getmtime") + os.path.getmtime(mox.IgnoreArg()).AndReturn(1) + self.mox.ReplayAll() + + cache_data = {"data": 1123, "mtime": 1} + data = utils.read_cached_file("/this/is/a/fake", cache_data) + self.assertEqual(cache_data["data"], data) + + def test_read_modified_cached_file(self): + self.mox.StubOutWithMock(os.path, "getmtime") + self.mox.StubOutWithMock(__builtin__, 'open') + os.path.getmtime(mox.IgnoreArg()).AndReturn(2) + + fake_contents = "lorem ipsum" + fake_file = self.mox.CreateMockAnything() + fake_file.read().AndReturn(fake_contents) + fake_context_manager = self.mox.CreateMockAnything() + fake_context_manager.__enter__().AndReturn(fake_file) + fake_context_manager.__exit__(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + + __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager) + + self.mox.ReplayAll() + cache_data = {"data": 1123, "mtime": 1} + self.reload_called = False + + def test_reload(reloaded_data): + self.assertEqual(reloaded_data, fake_contents) + self.reload_called = True + + data = utils.read_cached_file("/this/is/a/fake", + cache_data, + reload_func=test_reload) + self.assertEqual(data, fake_contents) + self.assertTrue(self.reload_called) + + def test_generate_password(self): + password = utils.generate_password() + self.assertTrue([c for c in password if c in '0123456789']) + self.assertTrue([c for c in password + if c in 'abcdefghijklmnopqrstuvwxyz']) + self.assertTrue([c for c in password + if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) + + def test_read_file_as_root(self): + def fake_execute(*args, **kwargs): + if args[1] == 'bad': + raise exception.ProcessExecutionError + return 'fakecontents', None + + self.stubs.Set(utils, 'execute', fake_execute) + contents = utils.read_file_as_root('good') + self.assertEqual(contents, 'fakecontents') + self.assertRaises(exception.FileNotFound, + utils.read_file_as_root, 'bad') + + def test_strcmp_const_time(self): + self.assertTrue(utils.strcmp_const_time('abc123', 'abc123')) + self.assertFalse(utils.strcmp_const_time('a', 'aaaaa')) + self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123')) + + def test_temporary_chown(self): + def fake_execute(*args, **kwargs): + if args[0] == 'chown': + fake_execute.uid = args[1] + self.stubs.Set(utils, 'execute', fake_execute) + + with tempfile.NamedTemporaryFile() as f: + with utils.temporary_chown(f.name, owner_uid=2): + self.assertEqual(fake_execute.uid, 2) + self.assertEqual(fake_execute.uid, os.getuid()) + + def test_service_is_up(self): + fts_func = datetime.datetime.fromtimestamp + fake_now = 1000 + down_time = 5 + + self.flags(service_down_time=down_time) + self.mox.StubOutWithMock(timeutils, 'utcnow') + + # Up (equal) + timeutils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - down_time), + 'created_at': fts_func(fake_now - down_time)} + self.mox.ReplayAll() + result = utils.service_is_up(service) + self.assertTrue(result) + + self.mox.ResetAll() + # Up + timeutils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - down_time + 1), + 'created_at': fts_func(fake_now - down_time + 1)} + self.mox.ReplayAll() + result = utils.service_is_up(service) + self.assertTrue(result) + + self.mox.ResetAll() + # Down + timeutils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - down_time - 1), + 'created_at': fts_func(fake_now - down_time - 1)} + self.mox.ReplayAll() + result = utils.service_is_up(service) + self.assertFalse(result) + + def test_safe_parse_xml(self): + + normal_body = ('' + 'heythere') + + def killer_body(): + return ((""" + + ]> + + + %(d)s + + """) % { + 'a': 'A' * 10, + 'b': '&a;' * 10, + 'c': '&b;' * 10, + 'd': '&c;' * 9999, + }).strip() + + dom = utils.safe_minidom_parse_string(normal_body) + # Some versions of minidom inject extra newlines so we ignore them + result = str(dom.toxml()).replace('\n', '') + self.assertEqual(normal_body, result) + + self.assertRaises(ValueError, + utils.safe_minidom_parse_string, + killer_body()) + + def test_xhtml_escape(self): + self.assertEqual('"foo"', utils.xhtml_escape('"foo"')) + self.assertEqual(''foo'', utils.xhtml_escape("'foo'")) + + def test_hash_file(self): + data = 'Mary had a little lamb, its fleece as white as snow' + flo = StringIO.StringIO(data) + h1 = utils.hash_file(flo) + h2 = hashlib.sha1(data).hexdigest() + self.assertEquals(h1, h2) + + +class MonkeyPatchTestCase(test.TestCase): + """Unit test for utils.monkey_patch().""" + def setUp(self): + super(MonkeyPatchTestCase, self).setUp() + self.example_package = 'cinder.tests.monkey_patch_example.' + self.flags( + monkey_patch=True, + monkey_patch_modules=[self.example_package + 'example_a' + ':' + + self.example_package + + 'example_decorator']) + + def test_monkey_patch(self): + utils.monkey_patch() + cinder.tests.monkey_patch_example.CALLED_FUNCTION = [] + from cinder.tests.monkey_patch_example import example_a + from cinder.tests.monkey_patch_example import example_b + + self.assertEqual('Example function', example_a.example_function_a()) + exampleA = example_a.ExampleClassA() + exampleA.example_method() + ret_a = exampleA.example_method_add(3, 5) + self.assertEqual(ret_a, 8) + + self.assertEqual('Example function', example_b.example_function_b()) + exampleB = example_b.ExampleClassB() + exampleB.example_method() + ret_b = exampleB.example_method_add(3, 5) + + self.assertEqual(ret_b, 8) + package_a = self.example_package + 'example_a.' + self.assertTrue(package_a + 'example_function_a' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + + self.assertTrue(package_a + 'ExampleClassA.example_method' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertTrue(package_a + 'ExampleClassA.example_method_add' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + package_b = self.example_package + 'example_b.' + self.assertFalse(package_b + 'example_function_b' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertFalse(package_b + 'ExampleClassB.example_method' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertFalse(package_b + 'ExampleClassB.example_method_add' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + + +class AuditPeriodTest(test.TestCase): + + def setUp(self): + super(AuditPeriodTest, self).setUp() + #a fairly random time to test with + self.test_time = datetime.datetime(second=23, + minute=12, + hour=8, + day=5, + month=3, + year=2012) + timeutils.set_time_override(override_time=self.test_time) + + def tearDown(self): + timeutils.clear_time_override() + super(AuditPeriodTest, self).tearDown() + + def test_hour(self): + begin, end = utils.last_completed_audit_period(unit='hour') + self.assertEquals(begin, + datetime.datetime(hour=7, + day=5, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime(hour=8, + day=5, + month=3, + year=2012)) + + def test_hour_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='hour@10') + self.assertEquals(begin, datetime.datetime(minute=10, + hour=7, + day=5, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime(minute=10, + hour=8, + day=5, + month=3, + year=2012)) + + def test_hour_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='hour@30') + self.assertEquals(begin, datetime.datetime(minute=30, + hour=6, + day=5, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime(minute=30, + hour=7, + day=5, + month=3, + year=2012)) + + def test_day(self): + begin, end = utils.last_completed_audit_period(unit='day') + self.assertEquals(begin, datetime.datetime(day=4, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime(day=5, + month=3, + year=2012)) + + def test_day_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='day@6') + self.assertEquals(begin, datetime.datetime(hour=6, + day=4, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime(hour=6, + day=5, + month=3, + year=2012)) + + def test_day_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='day@10') + self.assertEquals(begin, datetime.datetime(hour=10, + day=3, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime(hour=10, + day=4, + month=3, + year=2012)) + + def test_month(self): + begin, end = utils.last_completed_audit_period(unit='month') + self.assertEquals(begin, datetime.datetime(day=1, + month=2, + year=2012)) + self.assertEquals(end, datetime.datetime(day=1, + month=3, + year=2012)) + + def test_month_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='month@2') + self.assertEquals(begin, datetime.datetime(day=2, + month=2, + year=2012)) + self.assertEquals(end, datetime.datetime(day=2, + month=3, + year=2012)) + + def test_month_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='month@15') + self.assertEquals(begin, datetime.datetime(day=15, + month=1, + year=2012)) + self.assertEquals(end, datetime.datetime(day=15, + month=2, + year=2012)) + + def test_year(self): + begin, end = utils.last_completed_audit_period(unit='year') + self.assertEquals(begin, datetime.datetime(day=1, + month=1, + year=2011)) + self.assertEquals(end, datetime.datetime(day=1, + month=1, + year=2012)) + + def test_year_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='year@2') + self.assertEquals(begin, datetime.datetime(day=1, + month=2, + year=2011)) + self.assertEquals(end, datetime.datetime(day=1, + month=2, + year=2012)) + + def test_year_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='year@6') + self.assertEquals(begin, datetime.datetime(day=1, + month=6, + year=2010)) + self.assertEquals(end, datetime.datetime(day=1, + month=6, + year=2011)) + + +class FakeSSHClient(object): + + def __init__(self): + self.id = uuid.uuid4() + self.transport = FakeTransport() + + def set_missing_host_key_policy(self, policy): + pass + + def connect(self, ip, port=22, username=None, password=None, + pkey=None, timeout=10): + pass + + def get_transport(self): + return self.transport + + def close(self): + pass + + def __call__(self, *args, **kwargs): + pass + + +class FakeSock(object): + def settimeout(self, timeout): + pass + + +class FakeTransport(object): + + def __init__(self): + self.active = True + self.sock = FakeSock() + + def set_keepalive(self, timeout): + pass + + def is_active(self): + return self.active + + +class SSHPoolTestCase(test.TestCase): + """Unit test for SSH Connection Pool.""" + + def setup(self): + self.mox.StubOutWithMock(paramiko, "SSHClient") + paramiko.SSHClient().AndReturn(FakeSSHClient()) + self.mox.ReplayAll() + + def test_single_ssh_connect(self): + self.setup() + sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", + min_size=1, max_size=1) + with sshpool.item() as ssh: + first_id = ssh.id + + with sshpool.item() as ssh: + second_id = ssh.id + + self.assertEqual(first_id, second_id) + + def test_closed_reopend_ssh_connections(self): + self.setup() + sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", + min_size=1, max_size=2) + with sshpool.item() as ssh: + first_id = ssh.id + with sshpool.item() as ssh: + second_id = ssh.id + # Close the connection and test for a new connection + ssh.get_transport().active = False + + self.assertEqual(first_id, second_id) + + # The mox items are not getting setup in a new pool connection, + # so had to reset and set again. + self.mox.UnsetStubs() + self.setup() + + with sshpool.item() as ssh: + third_id = ssh.id + + self.assertNotEqual(first_id, third_id) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py new file mode 100644 index 0000000000..55cd8a2564 --- /dev/null +++ b/cinder/tests/test_volume.py @@ -0,0 +1,1331 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Volume Code. + +""" + +import datetime +import os + +import mox +import shutil +import tempfile + +from cinder.brick.iscsi import iscsi +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.image import image_utils +from cinder.openstack.common import importutils +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder.openstack.common import rpc +import cinder.policy +from cinder import quota +from cinder import test +from cinder.tests import fake_flags +from cinder.tests.image import fake as fake_image +from cinder.volume import configuration as conf +from cinder.volume import driver + +QUOTAS = quota.QUOTAS +FLAGS = flags.FLAGS + + +class VolumeTestCase(test.TestCase): + """Test Case for volumes.""" + + def setUp(self): + super(VolumeTestCase, self).setUp() + vol_tmpdir = tempfile.mkdtemp() + self.flags(connection_type='fake', + volumes_dir=vol_tmpdir, + notification_driver=[test_notifier.__name__]) + self.volume = importutils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) + fake_image.stub_out_image_service(self.stubs) + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + try: + shutil.rmtree(FLAGS.volumes_dir) + except OSError: + pass + notifier_api._reset_drivers() + super(VolumeTestCase, self).tearDown() + + def fake_get_target(obj, iqn): + return 1 + + @staticmethod + def _create_volume(size=0, snapshot_id=None, image_id=None, + metadata=None, status="creating"): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['snapshot_id'] = snapshot_id + vol['image_id'] = image_id + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = status + vol['attach_status'] = "detached" + vol['host'] = FLAGS.host + if metadata is not None: + vol['metadata'] = metadata + return db.volume_create(context.get_admin_context(), vol) + + def test_init_host_clears_downloads(self): + """Test that init_host will unwedge a volume stuck in downloading.""" + volume = self._create_volume(status='downloading') + volume_id = volume['id'] + self.volume.init_host() + volume = db.volume_get(context.get_admin_context(), volume_id) + self.assertEquals(volume['status'], "error") + self.volume.delete_volume(self.context, volume_id) + + def test_create_delete_volume(self): + """Test volume can be created and deleted.""" + # Need to stub out reserve, commit, and rollback + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit(context, reservations, project_id=None): + pass + + def fake_rollback(context, reservations, project_id=None): + pass + + self.stubs.Set(QUOTAS, "reserve", fake_reserve) + self.stubs.Set(QUOTAS, "commit", fake_commit) + self.stubs.Set(QUOTAS, "rollback", fake_rollback) + + volume = self._create_volume() + volume_id = volume['id'] + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + self.volume.create_volume(self.context, volume_id) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEqual(msg['event_type'], 'volume.create.start') + expected = { + 'status': 'creating', + 'display_name': None, + 'availability_zone': 'nova', + 'tenant_id': 'fake', + 'created_at': 'DONTCARE', + 'volume_id': volume_id, + 'volume_type': None, + 'snapshot_id': None, + 'user_id': 'fake', + 'launched_at': '', + 'size': 0, + } + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[1] + self.assertEqual(msg['event_type'], 'volume.create.end') + expected = { + 'status': 'creating', + 'display_name': None, + 'availability_zone': 'nova', + 'tenant_id': 'fake', + 'created_at': 'DONTCARE', + 'volume_id': volume_id, + 'volume_type': None, + 'snapshot_id': None, + 'user_id': 'fake', + 'launched_at': '', + 'size': 0, + } + self.assertDictMatch(msg['payload'], expected) + self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), + volume_id).id) + + self.volume.delete_volume(self.context, volume_id) + vol = db.volume_get(context.get_admin_context(read_deleted='yes'), + volume_id) + self.assertEquals(vol['status'], 'deleted') + self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) + msg = test_notifier.NOTIFICATIONS[2] + self.assertEqual(msg['event_type'], 'volume.delete.start') + expected = { + 'status': 'available', + 'display_name': None, + 'availability_zone': 'nova', + 'tenant_id': 'fake', + 'created_at': 'DONTCARE', + 'volume_id': volume_id, + 'volume_type': None, + 'snapshot_id': None, + 'user_id': 'fake', + 'launched_at': 'DONTCARE', + 'size': 0, + } + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[3] + self.assertEqual(msg['event_type'], 'volume.delete.end') + expected = { + 'status': 'available', + 'display_name': None, + 'availability_zone': 'nova', + 'tenant_id': 'fake', + 'created_at': 'DONTCARE', + 'volume_id': volume_id, + 'volume_type': None, + 'snapshot_id': None, + 'user_id': 'fake', + 'launched_at': 'DONTCARE', + 'size': 0, + } + self.assertDictMatch(msg['payload'], expected) + self.assertRaises(exception.NotFound, + db.volume_get, + self.context, + volume_id) + + def test_create_delete_volume_with_metadata(self): + """Test volume can be created with metadata and deleted.""" + test_meta = {'fake_key': 'fake_value'} + volume = self._create_volume(0, None, metadata=test_meta) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + result_meta = { + volume.volume_metadata[0].key: volume.volume_metadata[0].value} + self.assertEqual(result_meta, test_meta) + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.NotFound, + db.volume_get, + self.context, + volume_id) + + def test_create_volume_with_invalid_metadata(self): + """Test volume create with too much metadata fails.""" + volume_api = cinder.volume.api.API() + test_meta = {'fake_key': 'fake_value' * 256} + self.assertRaises(exception.InvalidVolumeMetadataSize, + volume_api.create, + self.context, + 1, + 'name', + 'description', + None, + None, + None, + test_meta) + + def test_create_volume_with_volume_type(self): + """Test volume creation with default volume type.""" + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit(context, reservations, project_id=None): + pass + + def fake_rollback(context, reservations, project_id=None): + pass + + self.stubs.Set(QUOTAS, "reserve", fake_reserve) + self.stubs.Set(QUOTAS, "commit", fake_commit) + self.stubs.Set(QUOTAS, "rollback", fake_rollback) + + volume_api = cinder.volume.api.API() + + # Create volume with default volume type while default + # volume type doesn't exist, volume_type_id should be NULL + volume = volume_api.create(self.context, + 1, + 'name', + 'description') + self.assertEquals(volume['volume_type_id'], None) + + # Create default volume type + vol_type = fake_flags.def_vol_type + db.volume_type_create(context.get_admin_context(), + dict(name=vol_type, extra_specs={})) + + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + vol_type) + + # Create volume with default volume type + volume = volume_api.create(self.context, + 1, + 'name', + 'description') + self.assertEquals(volume['volume_type_id'], db_vol_type.get('id')) + + # Create volume with specific volume type + vol_type = 'test' + db.volume_type_create(context.get_admin_context(), + dict(name=vol_type, extra_specs={})) + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + vol_type) + + volume = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + self.assertEquals(volume['volume_type_id'], db_vol_type.get('id')) + + def test_delete_busy_volume(self): + """Test volume survives deletion if driver reports it as busy.""" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + + self.mox.StubOutWithMock(self.volume.driver, 'delete_volume') + self.volume.driver.delete_volume( + mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy( + volume_name='fake')) + self.mox.ReplayAll() + res = self.volume.delete_volume(self.context, volume_id) + self.assertEqual(True, res) + volume_ref = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(volume_id, volume_ref.id) + self.assertEqual("available", volume_ref.status) + + self.mox.UnsetStubs() + self.volume.delete_volume(self.context, volume_id) + + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src = self._create_volume() + self.volume.create_volume(self.context, volume_src['id']) + snapshot_id = self._create_snapshot(volume_src['id'])['id'] + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot_id) + volume_dst = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) + self.assertEqual(volume_dst['id'], + db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) + self.assertEqual(snapshot_id, + db.volume_get(context.get_admin_context(), + volume_dst['id']).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst['id']) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src['id']) + + def test_create_volume_from_snapshot_fail_bad_size(self): + """Test volume can't be created from snapshot with bad volume size.""" + volume_api = cinder.volume.api.API() + snapshot = dict(id=1234, + status='available', + volume_size=10) + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + size=1, + name='fake_name', + description='fake_desc', + snapshot=snapshot) + + def test_create_volume_with_invalid_exclusive_options(self): + """Test volume create with multiple exclusive options fails.""" + volume_api = cinder.volume.api.API() + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + 1, + 'name', + 'description', + snapshot='fake_id', + image_id='fake_id', + source_volume='fake_id') + + def test_too_big_volume(self): + """Ensure failure if a too large of a volume is requested.""" + # FIXME(vish): validation needs to move into the data layer in + # volume_create + return True + try: + volume = self._create_volume(1001) + self.volume.create_volume(self.context, volume) + self.fail("Should have thrown TypeError") + except TypeError: + pass + + def test_run_attach_detach_volume(self): + """Make sure volume can be attached and detached from instance.""" + instance_uuid = '12345678-1234-5678-1234-567812345678' + mountpoint = "/dev/sdf" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + self.volume.attach_volume(self.context, volume_id, instance_uuid, + mountpoint) + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertEqual(vol['instance_uuid'], instance_uuid) + + self.assertRaises(exception.VolumeAttached, + self.volume.delete_volume, + self.context, + volume_id) + self.volume.detach_volume(self.context, volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.VolumeNotFound, + db.volume_get, + self.context, + volume_id) + + def test_concurrent_volumes_get_different_targets(self): + """Ensure multiple concurrent volumes get different targets.""" + volume_ids = [] + targets = [] + + def _check(volume_id): + """Make sure targets aren't duplicated.""" + volume_ids.append(volume_id) + admin_context = context.get_admin_context() + iscsi_target = db.volume_get_iscsi_target_num(admin_context, + volume_id) + self.assert_(iscsi_target not in targets) + targets.append(iscsi_target) + + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + self._create_volume() + for volume_id in volume_ids: + self.volume.delete_volume(self.context, volume_id) + + def test_multi_node(self): + # TODO(termie): Figure out how to test with two nodes, + # each of them having a different FLAG for storage_node + # This will allow us to test cross-node interactions + pass + + @staticmethod + def _create_snapshot(volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap) + + def test_create_delete_snapshot(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + self.volume.create_volume(self.context, volume['id']) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) + snapshot_id = self._create_snapshot(volume['id'])['id'] + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) + msg = test_notifier.NOTIFICATIONS[2] + self.assertEquals(msg['event_type'], 'snapshot.create.start') + expected = { + 'created_at': 'DONTCARE', + 'deleted': '', + 'display_name': None, + 'snapshot_id': snapshot_id, + 'status': 'creating', + 'tenant_id': 'fake', + 'user_id': 'fake', + 'volume_id': volume['id'], + 'volume_size': 0, + 'availability_zone': 'nova' + } + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[3] + self.assertEquals(msg['event_type'], 'snapshot.create.end') + expected = { + 'created_at': 'DONTCARE', + 'deleted': '', + 'display_name': None, + 'snapshot_id': snapshot_id, + 'status': 'creating', + 'tenant_id': 'fake', + 'user_id': 'fake', + 'volume_id': volume['id'], + 'volume_size': 0, + 'availability_zone': 'nova' + } + self.assertDictMatch(msg['payload'], expected) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 6) + msg = test_notifier.NOTIFICATIONS[4] + self.assertEquals(msg['event_type'], 'snapshot.delete.start') + expected = { + 'created_at': 'DONTCARE', + 'deleted': '', + 'display_name': None, + 'snapshot_id': snapshot_id, + 'status': 'available', + 'tenant_id': 'fake', + 'user_id': 'fake', + 'volume_id': volume['id'], + 'volume_size': 0, + 'availability_zone': 'nova' + } + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[5] + self.assertEquals(msg['event_type'], 'snapshot.delete.end') + expected = { + 'created_at': 'DONTCARE', + 'deleted': '', + 'display_name': None, + 'snapshot_id': snapshot_id, + 'status': 'available', + 'tenant_id': 'fake', + 'user_id': 'fake', + 'volume_id': volume['id'], + 'volume_size': 0, + 'availability_zone': 'nova' + } + self.assertDictMatch(msg['payload'], expected) + + snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'), + snapshot_id) + self.assertEquals(snap['status'], 'deleted') + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + self.volume.delete_volume(self.context, volume['id']) + + def test_cant_delete_volume_in_use(self): + """Test volume can't be deleted in invalid stats.""" + # create a volume and assign to host + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + volume['status'] = 'in-use' + volume['host'] = 'fakehost' + + volume_api = cinder.volume.api.API() + + # 'in-use' status raises InvalidVolume + self.assertRaises(exception.InvalidVolume, + volume_api.delete, + self.context, + volume) + + # clean up + self.volume.delete_volume(self.context, volume['id']) + + def test_force_delete_volume(self): + """Test volume can be forced to delete.""" + # create a volume and assign to host + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + volume['status'] = 'error_deleting' + volume['host'] = 'fakehost' + + volume_api = cinder.volume.api.API() + + # 'error_deleting' volumes can't be deleted + self.assertRaises(exception.InvalidVolume, + volume_api.delete, + self.context, + volume) + + # delete with force + volume_api.delete(self.context, volume, force=True) + + # status is deleting + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEquals(volume['status'], 'deleting') + + # clean up + self.volume.delete_volume(self.context, volume['id']) + + def test_cant_delete_volume_with_snapshots(self): + """Test volume can't be deleted with dependent snapshots.""" + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + snapshot_id = self._create_snapshot(volume['id'])['id'] + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + volume['status'] = 'available' + volume['host'] = 'fakehost' + + volume_api = cinder.volume.api.API() + + self.assertRaises(exception.InvalidVolume, + volume_api.delete, + self.context, + volume) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume['id']) + + def test_can_delete_errored_snapshot(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + snapshot_id = self._create_snapshot(volume['id'])['id'] + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) + snapshot = db.snapshot_get(context.get_admin_context(), + snapshot_id) + + volume_api = cinder.volume.api.API() + + snapshot['status'] = 'badstatus' + self.assertRaises(exception.InvalidSnapshot, + volume_api.delete_snapshot, + self.context, + snapshot) + + snapshot['status'] = 'error' + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume['id']) + + def test_create_snapshot_force(self): + """Test snapshot in use can be created forcibly.""" + + def fake_cast(ctxt, topic, msg): + pass + self.stubs.Set(rpc, 'cast', fake_cast) + instance_uuid = '12345678-1234-5678-1234-567812345678' + + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + db.volume_attached(self.context, volume['id'], instance_uuid, + '/dev/sda1') + + volume_api = cinder.volume.api.API() + volume = volume_api.get(self.context, volume['id']) + self.assertRaises(exception.InvalidVolume, + volume_api.create_snapshot, + self.context, volume, + 'fake_name', 'fake_description') + snapshot_ref = volume_api.create_snapshot_force(self.context, + volume, + 'fake_name', + 'fake_description') + db.snapshot_destroy(self.context, snapshot_ref['id']) + db.volume_destroy(self.context, volume['id']) + + def test_delete_busy_snapshot(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id)['id'] + self.volume.create_snapshot(self.context, volume_id, snapshot_id) + + self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') + self.volume.driver.delete_snapshot( + mox.IgnoreArg()).AndRaise( + exception.SnapshotIsBusy(snapshot_name='fake')) + self.mox.ReplayAll() + self.volume.delete_snapshot(self.context, snapshot_id) + snapshot_ref = db.snapshot_get(self.context, snapshot_id) + self.assertEqual(snapshot_id, snapshot_ref.id) + self.assertEqual("available", snapshot_ref.status) + + self.mox.UnsetStubs() + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_id) + + def _create_volume_from_image(self, expected_status, + fakeout_copy_image_to_volume=False): + """Call copy image to volume, Test the status of volume after calling + copying image to volume.""" + def fake_local_path(volume): + return dst_path + + def fake_copy_image_to_volume(context, volume, + image_service, image_id): + pass + + def fake_fetch_to_raw(context, image_service, image_id, vol_path): + pass + + dst_fd, dst_path = tempfile.mkstemp() + os.close(dst_fd) + self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) + self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw) + if fakeout_copy_image_to_volume: + self.stubs.Set(self.volume, '_copy_image_to_volume', + fake_copy_image_to_volume) + + image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + volume_id = 1 + # creating volume testdata + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'instance_uuid': None, + 'host': 'dummy'}) + try: + self.volume.create_volume(self.context, + volume_id, + image_id=image_id) + + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], expected_status) + finally: + # cleanup + db.volume_destroy(self.context, volume_id) + os.unlink(dst_path) + + def test_create_volume_from_image_status_available(self): + """Verify that before copying image to volume, it is in available + state.""" + self._create_volume_from_image('available') + + def test_create_volume_from_image_exception(self): + """Verify that create volume from image, the volume status is + 'downloading'.""" + dst_fd, dst_path = tempfile.mkstemp() + os.close(dst_fd) + + self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) + + image_id = 'aaaaaaaa-0000-0000-0000-000000000000' + # creating volume testdata + volume_id = 1 + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'host': 'dummy'}) + + self.assertRaises(exception.ImageNotFound, + self.volume.create_volume, + self.context, + volume_id, None, None, None, + None, + image_id) + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], "error") + # cleanup + db.volume_destroy(self.context, volume_id) + os.unlink(dst_path) + + def test_copy_volume_to_image_status_available(self): + dst_fd, dst_path = tempfile.mkstemp() + os.close(dst_fd) + + def fake_local_path(volume): + return dst_path + + self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) + + image_meta = { + 'id': '70a599e0-31e7-49b7-b260-868f441e862b', + 'container_format': 'bare', + 'disk_format': 'raw'} + + # creating volume testdata + volume_id = 1 + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'uploading', + 'instance_uuid': None, + 'host': 'dummy'}) + + try: + # start test + self.volume.copy_volume_to_image(self.context, + volume_id, + image_meta) + + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], 'available') + finally: + # cleanup + db.volume_destroy(self.context, volume_id) + os.unlink(dst_path) + + def test_copy_volume_to_image_status_use(self): + dst_fd, dst_path = tempfile.mkstemp() + os.close(dst_fd) + + def fake_local_path(volume): + return dst_path + + self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) + + image_meta = { + 'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', + 'container_format': 'bare', + 'disk_format': 'raw'} + # creating volume testdata + volume_id = 1 + db.volume_create( + self.context, + {'id': volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'uploading', + 'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02', + 'host': 'dummy'}) + + try: + # start test + self.volume.copy_volume_to_image(self.context, + volume_id, + image_meta) + + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], 'in-use') + finally: + # cleanup + db.volume_destroy(self.context, volume_id) + os.unlink(dst_path) + + def test_copy_volume_to_image_exception(self): + dst_fd, dst_path = tempfile.mkstemp() + os.close(dst_fd) + + def fake_local_path(volume): + return dst_path + + self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) + + image_meta = { + 'id': 'aaaaaaaa-0000-0000-0000-000000000000', + 'container_format': 'bare', + 'disk_format': 'raw'} + # creating volume testdata + volume_id = 1 + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'in-use', + 'host': 'dummy'}) + + try: + # start test + self.assertRaises(exception.ImageNotFound, + self.volume.copy_volume_to_image, + self.context, + volume_id, + image_meta) + + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], 'available') + finally: + # cleanup + db.volume_destroy(self.context, volume_id) + os.unlink(dst_path) + + def test_create_volume_from_exact_sized_image(self): + """Verify that an image which is exactly the same size as the + volume, will work correctly.""" + class _FakeImageService: + def __init__(self, db_driver=None, image_service=None): + pass + + def show(self, context, image_id): + return {'size': 2 * 1024 * 1024 * 1024, + 'disk_format': 'raw', + 'container_format': 'bare'} + + image_id = '70a599e0-31e7-49b7-b260-868f441e862b' + + try: + volume_id = None + volume_api = cinder.volume.api.API( + image_service=_FakeImageService()) + volume = volume_api.create(self.context, 2, 'name', 'description', + image_id=1) + volume_id = volume['id'] + self.assertEqual(volume['status'], 'creating') + + finally: + # cleanup + db.volume_destroy(self.context, volume_id) + + def test_create_volume_from_oversized_image(self): + """Verify that an image which is too big will fail correctly.""" + class _FakeImageService: + def __init__(self, db_driver=None, image_service=None): + pass + + def show(self, context, image_id): + return {'size': 2 * 1024 * 1024 * 1024 + 1, + 'disk_format': 'raw', + 'container_format': 'bare'} + + image_id = '70a599e0-31e7-49b7-b260-868f441e862b' + + volume_api = cinder.volume.api.API(image_service=_FakeImageService()) + + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, 2, + 'name', 'description', image_id=1) + + def test_create_volume_with_mindisk_error(self): + """Verify volumes smaller than image minDisk will cause an error.""" + class _FakeImageService: + def __init__(self, db_driver=None, image_service=None): + pass + + def show(self, context, image_id): + return {'size': 2 * 1024 * 1024 * 1024, + 'disk_format': 'raw', + 'container_format': 'bare', + 'min_disk': 5} + + image_id = '70a599e0-31e7-49b7-b260-868f441e862b' + + volume_api = cinder.volume.api.API(image_service=_FakeImageService()) + + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, 2, + 'name', 'description', image_id=1) + + def _do_test_create_volume_with_size(self, size): + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit(context, reservations, project_id=None): + pass + + def fake_rollback(context, reservations, project_id=None): + pass + + self.stubs.Set(QUOTAS, "reserve", fake_reserve) + self.stubs.Set(QUOTAS, "commit", fake_commit) + self.stubs.Set(QUOTAS, "rollback", fake_rollback) + + volume_api = cinder.volume.api.API() + + volume = volume_api.create(self.context, + size, + 'name', + 'description') + self.assertEquals(volume['size'], int(size)) + + def test_create_volume_int_size(self): + """Test volume creation with int size.""" + self._do_test_create_volume_with_size(2) + + def test_create_volume_string_size(self): + """Test volume creation with string size.""" + self._do_test_create_volume_with_size('2') + + def test_create_volume_with_bad_size(self): + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit(context, reservations, project_id=None): + pass + + def fake_rollback(context, reservations, project_id=None): + pass + + self.stubs.Set(QUOTAS, "reserve", fake_reserve) + self.stubs.Set(QUOTAS, "commit", fake_commit) + self.stubs.Set(QUOTAS, "rollback", fake_rollback) + + volume_api = cinder.volume.api.API() + + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + '2Gb', + 'name', + 'description') + + def test_begin_roll_detaching_volume(self): + """Test begin_detaching and roll_detaching functions.""" + volume = self._create_volume() + volume_api = cinder.volume.api.API() + volume_api.begin_detaching(self.context, volume) + volume = db.volume_get(self.context, volume['id']) + self.assertEqual(volume['status'], "detaching") + volume_api.roll_detaching(self.context, volume) + volume = db.volume_get(self.context, volume['id']) + self.assertEqual(volume['status'], "in-use") + + def test_volume_api_update(self): + # create a raw vol + volume = self._create_volume() + # use volume.api to update name + volume_api = cinder.volume.api.API() + update_dict = {'display_name': 'test update name'} + volume_api.update(self.context, volume, update_dict) + # read changes from db + vol = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEquals(vol['display_name'], 'test update name') + + def test_volume_api_update_snapshot(self): + # create raw snapshot + volume = self._create_volume() + snapshot = self._create_snapshot(volume['id']) + self.assertEquals(snapshot['display_name'], None) + # use volume.api to update name + volume_api = cinder.volume.api.API() + update_dict = {'display_name': 'test update name'} + volume_api.update_snapshot(self.context, snapshot, update_dict) + # read changes from db + snap = db.snapshot_get(context.get_admin_context(), snapshot['id']) + self.assertEquals(snap['display_name'], 'test update name') + + def test_volume_get_active_by_window(self): + # Find all all volumes valid within a timeframe window. + try: # Not in window + db.volume_create( + self.context, + { + 'id': 1, + 'host': 'devstack', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), + } + ) + except exception.VolumeNotFound: + pass + + try: # In - deleted in window + db.volume_create( + self.context, + { + 'id': 2, + 'host': 'devstack', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), + } + ) + except exception.VolumeNotFound: + pass + + try: # In - deleted after window + db.volume_create( + self.context, + { + 'id': 3, + 'host': 'devstack', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), + } + ) + except exception.VolumeNotFound: + pass + + # In - created in window + db.volume_create( + self.context, + { + 'id': 4, + 'host': 'devstack', + 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), + } + ) + + # Not of window. + db.volume_create( + self.context, + { + 'id': 5, + 'host': 'devstack', + 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), + } + ) + + volumes = db.volume_get_active_by_window( + self.context, + datetime.datetime(1, 3, 1, 1, 1, 1), + datetime.datetime(1, 4, 1, 1, 1, 1)) + self.assertEqual(len(volumes), 3) + self.assertEqual(volumes[0].id, u'2') + self.assertEqual(volumes[1].id, u'3') + self.assertEqual(volumes[2].id, u'4') + + def test_snapshot_get_active_by_window(self): + # Find all all snapshots valid within a timeframe window. + vol = db.volume_create(self.context, {'id': 1}) + + try: # Not in window + db.snapshot_create( + self.context, + { + 'id': 1, + 'host': 'devstack', + 'volume_id': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), + } + ) + except exception.SnapshotNotFound: + pass + + try: # In - deleted in window + db.snapshot_create( + self.context, + { + 'id': 2, + 'host': 'devstack', + 'volume_id': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), + } + ) + except exception.SnapshotNotFound: + pass + + try: # In - deleted after window + db.snapshot_create( + self.context, + { + 'id': 3, + 'host': 'devstack', + 'volume_id': 1, + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), + } + ) + except exception.SnapshotNotFound: + pass + + # In - created in window + db.snapshot_create( + self.context, + { + 'id': 4, + 'host': 'devstack', + 'volume_id': 1, + 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), + } + ) + + # Not of window. + db.snapshot_create( + self.context, + { + 'id': 5, + 'host': 'devstack', + 'volume_id': 1, + 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), + } + ) + + snapshots = db.snapshot_get_active_by_window( + self.context, + datetime.datetime(1, 3, 1, 1, 1, 1), + datetime.datetime(1, 4, 1, 1, 1, 1)) + self.assertEqual(len(snapshots), 3) + self.assertEqual(snapshots[0].id, u'2') + self.assertEqual(snapshots[1].id, u'3') + self.assertEqual(snapshots[2].id, u'4') + + +class DriverTestCase(test.TestCase): + """Base Test class for Drivers.""" + driver_name = "cinder.volume.driver.FakeBaseDriver" + + def setUp(self): + super(DriverTestCase, self).setUp() + vol_tmpdir = tempfile.mkdtemp() + self.flags(volume_driver=self.driver_name, + volumes_dir=vol_tmpdir) + self.volume = importutils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + self.output = "" + self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) + + def _fake_execute(_command, *_args, **_kwargs): + """Fake _execute.""" + return self.output, None + self.volume.driver.set_execute(_fake_execute) + + def tearDown(self): + try: + shutil.rmtree(FLAGS.volumes_dir) + except OSError: + pass + super(DriverTestCase, self).tearDown() + + def fake_get_target(obj, iqn): + return 1 + + def _attach_volume(self): + """Attach volumes to an instance. """ + return [] + + def _detach_volume(self, volume_id_list): + """Detach volumes from an instance.""" + for volume_id in volume_id_list: + db.volume_detached(self.context, volume_id) + self.volume.delete_volume(self.context, volume_id) + + +class VolumeDriverTestCase(DriverTestCase): + """Test case for VolumeDriver""" + driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" + + def test_delete_busy_volume(self): + """Test deleting a busy volume.""" + self.stubs.Set(self.volume.driver, '_volume_not_present', + lambda x: False) + self.stubs.Set(self.volume.driver, '_delete_volume', + lambda x, y: False) + # Want DriverTestCase._fake_execute to return 'o' so that + # volume.driver.delete_volume() raises the VolumeIsBusy exception. + self.output = 'o' + self.assertRaises(exception.VolumeIsBusy, + self.volume.driver.delete_volume, + {'name': 'test1', 'size': 1024}) + # when DriverTestCase._fake_execute returns something other than + # 'o' volume.driver.delete_volume() does not raise an exception. + self.output = 'x' + self.volume.driver.delete_volume({'name': 'test1', 'size': 1024}) + + +class ISCSITestCase(DriverTestCase): + """Test Case for ISCSIDriver""" + driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver" + + def _attach_volume(self): + """Attach volumes to an instance. """ + volume_id_list = [] + for index in xrange(3): + vol = {} + vol['size'] = 0 + vol_ref = db.volume_create(self.context, vol) + self.volume.create_volume(self.context, vol_ref['id']) + vol_ref = db.volume_get(self.context, vol_ref['id']) + + # each volume has a different mountpoint + mountpoint = "/dev/sd" + chr((ord('b') + index)) + instance_uuid = '12345678-1234-5678-1234-567812345678' + db.volume_attached(self.context, vol_ref['id'], instance_uuid, + mountpoint) + volume_id_list.append(vol_ref['id']) + + return volume_id_list + + def test_do_iscsi_discovery(self): + configuration = mox.MockObject(conf.Configuration) + configuration.iscsi_ip_address = '0.0.0.0' + configuration.append_config_values(mox.IgnoreArg()) + + iscsi_driver = driver.ISCSIDriver(configuration=configuration) + iscsi_driver._execute = lambda *a, **kw: \ + ("%s dummy" % FLAGS.iscsi_ip_address, '') + volume = {"name": "dummy", + "host": "0.0.0.0"} + iscsi_driver._do_iscsi_discovery(volume) + + def test_get_iscsi_properties(self): + volume = {"provider_location": '', + "id": "0", + "provider_auth": "a b c"} + iscsi_driver = driver.ISCSIDriver() + iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0" + result = iscsi_driver._get_iscsi_properties(volume) + self.assertEquals(result["target_portal"], "0.0.0.0:0000") + self.assertEquals(result["target_iqn"], "iqn:iqn") + self.assertEquals(result["target_lun"], 0) + + def test_get_volume_stats(self): + def _emulate_vgs_execute(_command, *_args, **_kwargs): + out = " test1-volumes 5,52 0,52" + out += " test2-volumes 5.52 0.52" + return out, None + + self.volume.driver.set_execute(_emulate_vgs_execute) + + self.volume.driver._update_volume_status() + + stats = self.volume.driver._stats + + self.assertEquals(stats['total_capacity_gb'], float('5.52')) + self.assertEquals(stats['free_capacity_gb'], float('0.52')) + + +class FibreChannelTestCase(DriverTestCase): + """Test Case for FibreChannelDriver""" + driver_name = "cinder.volume.driver.FibreChannelDriver" + + def test_initialize_connection(self): + self.driver = driver.FibreChannelDriver() + self.driver.do_setup(None) + self.assertRaises(NotImplementedError, + self.driver.initialize_connection, {}, {}) + + +class VolumePolicyTestCase(test.TestCase): + + def setUp(self): + super(VolumePolicyTestCase, self).setUp() + + cinder.policy.reset() + cinder.policy.init() + + self.context = context.get_admin_context() + + def tearDown(self): + super(VolumePolicyTestCase, self).tearDown() + cinder.policy.reset() + + def _set_rules(self, rules): + cinder.common.policy.set_brain(cinder.common.policy.HttpBrain(rules)) + + def test_check_policy(self): + self.mox.StubOutWithMock(cinder.policy, 'enforce') + target = { + 'project_id': self.context.project_id, + 'user_id': self.context.user_id, + } + cinder.policy.enforce(self.context, 'volume:attach', target) + self.mox.ReplayAll() + cinder.volume.api.check_policy(self.context, 'attach') + + def test_check_policy_with_target(self): + self.mox.StubOutWithMock(cinder.policy, 'enforce') + target = { + 'project_id': self.context.project_id, + 'user_id': self.context.user_id, + 'id': 2, + } + cinder.policy.enforce(self.context, 'volume:attach', target) + self.mox.ReplayAll() + cinder.volume.api.check_policy(self.context, 'attach', {'id': 2}) diff --git a/cinder/tests/test_volume_configuration.py b/cinder/tests/test_volume_configuration.py new file mode 100644 index 0000000000..d2751c1d98 --- /dev/null +++ b/cinder/tests/test_volume_configuration.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2012 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the configuration wrapper in volume drivers.""" + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration +from cinder.volume import driver + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +volume_opts = [ + cfg.StrOpt('str_opt', default='STR_OPT'), + cfg.BoolOpt('bool_opt', default=False) +] +more_volume_opts = [ + cfg.IntOpt('int_opt', default=1), +] + +FLAGS.register_opts(volume_opts) +FLAGS.register_opts(more_volume_opts) + + +class VolumeConfigurationTest(test.TestCase): + def setUp(self): + super(VolumeConfigurationTest, self).setUp() + + def tearDown(self): + super(VolumeConfigurationTest, self).tearDown() + + def test_group_grafts_opts(self): + c = configuration.Configuration(volume_opts, config_group='foo') + self.assertEquals(c.str_opt, FLAGS.foo.str_opt) + self.assertEquals(c.bool_opt, FLAGS.foo.bool_opt) + + def test_opts_no_group(self): + c = configuration.Configuration(volume_opts) + self.assertEquals(c.str_opt, FLAGS.str_opt) + self.assertEquals(c.bool_opt, FLAGS.bool_opt) + + def test_grafting_multiple_opts(self): + c = configuration.Configuration(volume_opts, config_group='foo') + c.append_config_values(more_volume_opts) + self.assertEquals(c.str_opt, FLAGS.foo.str_opt) + self.assertEquals(c.bool_opt, FLAGS.foo.bool_opt) + self.assertEquals(c.int_opt, FLAGS.foo.int_opt) + + def test_safe_get(self): + c = configuration.Configuration(volume_opts, config_group='foo') + self.assertEquals(c.safe_get('none_opt'), None) diff --git a/cinder/tests/test_volume_glance_metadata.py b/cinder/tests/test_volume_glance_metadata.py new file mode 100644 index 0000000000..5aa42d8b1c --- /dev/null +++ b/cinder/tests/test_volume_glance_metadata.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types extra specs code +""" + +from cinder import context +from cinder import db +from cinder import exception +from cinder import test + + +class VolumeGlanceMetadataTestCase(test.TestCase): + + def setUp(self): + super(VolumeGlanceMetadataTestCase, self).setUp() + self.context = context.get_admin_context() + + def tearDown(self): + super(VolumeGlanceMetadataTestCase, self).tearDown() + + def test_vol_glance_metadata_bad_vol_id(self): + ctxt = context.get_admin_context() + self.assertRaises(exception.VolumeNotFound, + db.volume_glance_metadata_create, + ctxt, 1, 'key1', 'value1') + self.assertRaises(exception.VolumeNotFound, + db.volume_glance_metadata_get, ctxt, 1) + db.volume_glance_metadata_delete_by_volume(ctxt, 10) + + def test_vol_update_glance_metadata(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.volume_create(ctxt, {'id': 2}) + vol_metadata = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1', + 'value1') + vol_metadata = db.volume_glance_metadata_create(ctxt, 2, + 'key2', + 'value2') + + expected_metadata_1 = {'volume_id': '1', + 'key': 'key1', + 'value': 'value1'} + + metadata = db.volume_glance_metadata_get(ctxt, 1) + self.assertEqual(len(metadata), 1) + for key, value in expected_metadata_1.items(): + self.assertEqual(metadata[0][key], value) + + expected_metadata_2 = ({'volume_id': '2', + 'key': 'key1', + 'value': 'value1'}, + {'volume_id': '2', + 'key': 'key2', + 'value': 'value2'}) + + metadata = db.volume_glance_metadata_get(ctxt, 2) + self.assertEqual(len(metadata), 2) + for expected, meta in zip(expected_metadata_2, metadata): + for key, value in expected.iteritems(): + self.assertEqual(meta[key], value) + + self.assertRaises(exception.GlanceMetadataExists, + db.volume_glance_metadata_create, + ctxt, 1, 'key1', 'value1a') + + metadata = db.volume_glance_metadata_get(ctxt, 1) + self.assertEqual(len(metadata), 1) + for key, value in expected_metadata_1.items(): + self.assertEqual(metadata[0][key], value) + + def test_vol_delete_glance_metadata(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.volume_glance_metadata_delete_by_volume(ctxt, 1) + vol_metadata = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + db.volume_glance_metadata_delete_by_volume(ctxt, 1) + metadata = db.volume_glance_metadata_get(ctxt, 1) + self.assertEqual(len(metadata), 0) + db.volume_glance_metadata_delete_by_volume(ctxt, 1) + metadata = db.volume_glance_metadata_get(ctxt, 1) + self.assertEqual(len(metadata), 0) + + def test_vol_glance_metadata_copy_to_snapshot(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.snapshot_create(ctxt, {'id': 100, 'volume_id': 1}) + vol_meta = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1) + + expected_meta = {'snapshot_id': '100', + 'key': 'key1', + 'value': 'value1'} + + for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100): + for (key, value) in expected_meta.items(): + self.assertEquals(meta[key], value) + + def test_vol_glance_metadata_copy_to_volume(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.volume_create(ctxt, {'id': 100, 'source_volid': 1}) + vol_meta = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + db.volume_glance_metadata_copy_from_volume_to_volume(ctxt, 100, 1) + + expected_meta = {'id': '100', + 'key': 'key1', + 'value': 'value1'} + + for meta in db.volume_glance_metadata_get(ctxt, 100): + for (key, value) in expected_meta.items(): + self.assertEquals(meta[key], value) diff --git a/cinder/tests/test_volume_rpcapi.py b/cinder/tests/test_volume_rpcapi.py new file mode 100644 index 0000000000..7a75224b81 --- /dev/null +++ b/cinder/tests/test_volume_rpcapi.py @@ -0,0 +1,172 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Intel, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for cinder.volume.rpcapi +""" + + +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common import rpc +from cinder import test +from cinder.volume import rpcapi as volume_rpcapi + + +FLAGS = flags.FLAGS + + +class VolumeRpcAPITestCase(test.TestCase): + + def setUp(self): + self.context = context.get_admin_context() + vol = {} + vol['host'] = 'fake_host' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "available" + vol['attach_status'] = "detached" + volume = db.volume_create(self.context, vol) + + snpshot = { + 'volume_id': 'fake_id', + 'status': "creating", + 'progress': '0%', + 'volume_size': 0, + 'display_name': 'fake_name', + 'display_description': 'fake_description'} + snapshot = db.snapshot_create(self.context, snpshot) + self.fake_volume = jsonutils.to_primitive(volume) + self.fake_snapshot = jsonutils.to_primitive(snapshot) + super(VolumeRpcAPITestCase, self).setUp() + + def test_serialized_volume_has_id(self): + self.assertTrue('id' in self.fake_volume) + + def _test_volume_api(self, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + + if 'rpcapi_class' in kwargs: + rpcapi_class = kwargs['rpcapi_class'] + del kwargs['rpcapi_class'] + else: + rpcapi_class = volume_rpcapi.VolumeAPI + rpcapi = rpcapi_class() + expected_retval = 'foo' if method == 'call' else None + + expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) + expected_msg = rpcapi.make_msg(method, **kwargs) + if 'volume' in expected_msg['args']: + volume = expected_msg['args']['volume'] + del expected_msg['args']['volume'] + expected_msg['args']['volume_id'] = volume['id'] + if 'snapshot' in expected_msg['args']: + snapshot = expected_msg['args']['snapshot'] + del expected_msg['args']['snapshot'] + expected_msg['args']['snapshot_id'] = snapshot['id'] + if 'host' in expected_msg['args']: + del expected_msg['args']['host'] + + expected_msg['version'] = expected_version + + if 'host' in kwargs: + host = kwargs['host'] + else: + host = kwargs['volume']['host'] + expected_topic = '%s.%s' % (FLAGS.volume_topic, host) + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.stubs.Set(rpc, rpc_method, _fake_rpc_method) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected_args = [ctxt, expected_topic, expected_msg] + for arg, expected_arg in zip(self.fake_args, expected_args): + self.assertEqual(arg, expected_arg) + + def test_create_volume(self): + self._test_volume_api('create_volume', + rpc_method='cast', + volume=self.fake_volume, + host='fake_host1', + request_spec='fake_request_spec', + filter_properties='fake_properties', + allow_reschedule=True, + snapshot_id='fake_snapshot_id', + image_id='fake_image_id', + source_volid='fake_src_id', + version='1.4') + + def test_delete_volume(self): + self._test_volume_api('delete_volume', + rpc_method='cast', + volume=self.fake_volume) + + def test_create_snapshot(self): + self._test_volume_api('create_snapshot', + rpc_method='cast', + volume=self.fake_volume, + snapshot=self.fake_snapshot) + + def test_delete_snapshot(self): + self._test_volume_api('delete_snapshot', + rpc_method='cast', + snapshot=self.fake_snapshot, + host='fake_host') + + def test_attach_volume(self): + self._test_volume_api('attach_volume', + rpc_method='call', + volume=self.fake_volume, + instance_uuid='fake_uuid', + mountpoint='fake_mountpoint') + + def test_detach_volume(self): + self._test_volume_api('detach_volume', + rpc_method='call', + volume=self.fake_volume) + + def test_copy_volume_to_image(self): + self._test_volume_api('copy_volume_to_image', + rpc_method='cast', + volume=self.fake_volume, + image_meta={'id': 'fake_image_id', + 'container_format': 'fake_type', + 'disk_format': 'fake_type'}, + version='1.3') + + def test_initialize_connection(self): + self._test_volume_api('initialize_connection', + rpc_method='call', + volume=self.fake_volume, + connector='fake_connector') + + def test_terminate_connection(self): + self._test_volume_api('terminate_connection', + rpc_method='call', + volume=self.fake_volume, + connector='fake_connector', + force=False) diff --git a/cinder/tests/test_volume_types.py b/cinder/tests/test_volume_types.py new file mode 100644 index 0000000000..8b639690ee --- /dev/null +++ b/cinder/tests/test_volume_types.py @@ -0,0 +1,187 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types code +""" +import time + +from cinder import context +from cinder.db.sqlalchemy import models +from cinder.db.sqlalchemy import session as sql_session +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests import fake_flags +from cinder.volume import volume_types + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class VolumeTypeTestCase(test.TestCase): + """Test cases for volume type code.""" + def setUp(self): + super(VolumeTypeTestCase, self).setUp() + + self.ctxt = context.get_admin_context() + self.vol_type1_name = str(int(time.time())) + self.vol_type1_specs = dict(type="physical drive", + drive_type="SAS", + size="300", + rpm="7200", + visible="True") + + def test_volume_type_create_then_destroy(self): + """Ensure volume types can be created and deleted.""" + prev_all_vtypes = volume_types.get_all_types(self.ctxt) + + type_ref = volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + LOG.info(_("Given data: %s"), self.vol_type1_specs) + LOG.info(_("Result data: %s"), new) + + for k, v in self.vol_type1_specs.iteritems(): + self.assertEqual(v, new['extra_specs'][k], + 'one of fields doesnt match') + + new_all_vtypes = volume_types.get_all_types(self.ctxt) + self.assertEqual(len(prev_all_vtypes) + 1, + len(new_all_vtypes), + 'drive type was not created') + + volume_types.destroy(self.ctxt, type_ref['id']) + new_all_vtypes = volume_types.get_all_types(self.ctxt) + self.assertEqual(prev_all_vtypes, + new_all_vtypes, + 'drive type was not deleted') + + def test_get_all_volume_types(self): + """Ensures that all volume types can be retrieved.""" + session = sql_session.get_session() + total_volume_types = session.query(models.VolumeTypes).count() + vol_types = volume_types.get_all_types(self.ctxt) + self.assertEqual(total_volume_types, len(vol_types)) + + def test_get_default_volume_type(self): + """Ensures default volume type can be retrieved.""" + type_ref = volume_types.create(self.ctxt, + fake_flags.def_vol_type, + {}) + default_vol_type = volume_types.get_default_volume_type() + self.assertEqual(default_vol_type.get('name'), + fake_flags.def_vol_type) + + def test_default_volume_type_missing_in_db(self): + """Ensures proper exception raised if default volume type + is not in database.""" + session = sql_session.get_session() + default_vol_type = volume_types.get_default_volume_type() + self.assertEqual(default_vol_type, {}) + + def test_non_existent_vol_type_shouldnt_delete(self): + """Ensures that volume type creation fails with invalid args.""" + self.assertRaises(exception.VolumeTypeNotFound, + volume_types.destroy, self.ctxt, "sfsfsdfdfs") + + def test_repeated_vol_types_shouldnt_raise(self): + """Ensures that volume duplicates don't raise.""" + new_name = self.vol_type1_name + "dup" + type_ref = volume_types.create(self.ctxt, new_name) + volume_types.destroy(self.ctxt, type_ref['id']) + type_ref = volume_types.create(self.ctxt, new_name) + + def test_invalid_volume_types_params(self): + """Ensures that volume type creation fails with invalid args.""" + self.assertRaises(exception.InvalidVolumeType, + volume_types.destroy, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.get_volume_type, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.get_volume_type_by_name, + self.ctxt, None) + + def test_volume_type_get_by_id_and_name(self): + """Ensure volume types get returns same entry.""" + volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + new2 = volume_types.get_volume_type(self.ctxt, new['id']) + self.assertEqual(new, new2) + + def test_volume_type_search_by_extra_spec(self): + """Ensure volume types get by extra spec returns correct type.""" + volume_types.create(self.ctxt, "type1", {"key1": "val1", + "key2": "val2"}) + volume_types.create(self.ctxt, "type2", {"key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type3", {"key3": "another_value", + "key4": "val4"}) + + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key1": "val1"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 1) + self.assertTrue("type1" in vol_types.keys()) + self.assertEqual(vol_types['type1']['extra_specs'], + {"key1": "val1", "key2": "val2"}) + + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key2": "val2"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 2) + self.assertTrue("type1" in vol_types.keys()) + self.assertTrue("type2" in vol_types.keys()) + + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key3": "val3"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 1) + self.assertTrue("type2" in vol_types.keys()) + + def test_volume_type_search_by_extra_spec_multiple(self): + """Ensure volume types get by extra spec returns correct type.""" + volume_types.create(self.ctxt, "type1", {"key1": "val1", + "key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type2", {"key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type3", {"key1": "val1", + "key3": "val3", + "key4": "val4"}) + + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key1": "val1", + "key3": "val3"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 2) + self.assertTrue("type1" in vol_types.keys()) + self.assertTrue("type3" in vol_types.keys()) + self.assertEqual(vol_types['type1']['extra_specs'], + {"key1": "val1", "key2": "val2", "key3": "val3"}) + self.assertEqual(vol_types['type3']['extra_specs'], + {"key1": "val1", "key3": "val3", "key4": "val4"}) diff --git a/cinder/tests/test_volume_types_extra_specs.py b/cinder/tests/test_volume_types_extra_specs.py new file mode 100644 index 0000000000..f8d4fac998 --- /dev/null +++ b/cinder/tests/test_volume_types_extra_specs.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types extra specs code +""" + +from cinder import context +from cinder import db +from cinder import test + + +class VolumeTypeExtraSpecsTestCase(test.TestCase): + + def setUp(self): + super(VolumeTypeExtraSpecsTestCase, self).setUp() + self.context = context.get_admin_context() + self.vol_type1 = dict(name="TEST: Regular volume test") + self.vol_type1_specs = dict(vol_extra1="value1", + vol_extra2="value2", + vol_extra3=3) + self.vol_type1['extra_specs'] = self.vol_type1_specs + ref = db.volume_type_create(self.context, self.vol_type1) + self.volume_type1_id = ref.id + for k, v in self.vol_type1_specs.iteritems(): + self.vol_type1_specs[k] = str(v) + + self.vol_type2_noextra = dict(name="TEST: Volume type without extra") + ref = db.volume_type_create(self.context, self.vol_type2_noextra) + self.vol_type2_id = ref.id + + def tearDown(self): + # Remove the volume type from the database + db.volume_type_destroy(context.get_admin_context(), + self.vol_type1['id']) + db.volume_type_destroy(context.get_admin_context(), + self.vol_type2_noextra['id']) + super(VolumeTypeExtraSpecsTestCase, self).tearDown() + + def test_volume_type_specs_get(self): + expected_specs = self.vol_type1_specs.copy() + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_delete(self): + expected_specs = self.vol_type1_specs.copy() + del expected_specs['vol_extra2'] + db.volume_type_extra_specs_delete(context.get_admin_context(), + self.volume_type1_id, + 'vol_extra2') + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_update(self): + expected_specs = self.vol_type1_specs.copy() + expected_specs['vol_extra3'] = "4" + db.volume_type_extra_specs_update_or_create( + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra3=4)) + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_create(self): + expected_specs = self.vol_type1_specs.copy() + expected_specs['vol_extra4'] = 'value4' + expected_specs['vol_extra5'] = 'value5' + db.volume_type_extra_specs_update_or_create( + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra4="value4", + vol_extra5="value5")) + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_get_with_extra_specs(self): + volume_type = db.volume_type_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(volume_type['extra_specs'], + self.vol_type1_specs) + + volume_type = db.volume_type_get( + context.get_admin_context(), + self.vol_type2_id) + self.assertEquals(volume_type['extra_specs'], {}) + + def test_volume_type_get_by_name_with_extra_specs(self): + volume_type = db.volume_type_get_by_name( + context.get_admin_context(), + self.vol_type1['name']) + self.assertEquals(volume_type['extra_specs'], + self.vol_type1_specs) + + volume_type = db.volume_type_get_by_name( + context.get_admin_context(), + self.vol_type2_noextra['name']) + self.assertEquals(volume_type['extra_specs'], {}) + + def test_volume_type_get_all(self): + expected_specs = self.vol_type1_specs.copy() + + types = db.volume_type_get_all(context.get_admin_context()) + + self.assertEquals( + types[self.vol_type1['name']]['extra_specs'], expected_specs) + + self.assertEquals( + types[self.vol_type2_noextra['name']]['extra_specs'], {}) diff --git a/cinder/tests/test_volume_utils.py b/cinder/tests/test_volume_utils.py new file mode 100644 index 0000000000..1521149ac0 --- /dev/null +++ b/cinder/tests/test_volume_utils.py @@ -0,0 +1,117 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests For miscellaneous util methods used with volume.""" + +from cinder import context +from cinder import db +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.volume import utils as volume_utils + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class UsageInfoTestCase(test.TestCase): + + QUEUE_NAME = 'cinder-volume' + HOSTNAME = 'my-host.com' + HOSTIP = '10.0.0.1' + BACKEND = 'test_backend' + MULTI_AT_BACKEND = 'test_b@ckend' + + def setUp(self): + super(UsageInfoTestCase, self).setUp() + self.flags(connection_type='fake', + host='fake', + notification_driver=[test_notifier.__name__]) + self.volume = importutils.import_object(FLAGS.volume_manager) + self.user_id = 'fake' + self.project_id = 'fake' + self.snapshot_id = 'fake' + self.volume_size = 0 + self.context = context.RequestContext(self.user_id, self.project_id) + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(UsageInfoTestCase, self).tearDown() + + def _create_volume(self, params={}): + """Create a test volume.""" + vol = {} + vol['snapshot_id'] = self.snapshot_id + vol['user_id'] = self.user_id + vol['project_id'] = self.project_id + vol['host'] = FLAGS.host + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + vol['size'] = self.volume_size + vol.update(params) + return db.volume_create(self.context, vol)['id'] + + def test_notify_usage_exists(self): + """Ensure 'exists' notification generates appropriate usage data.""" + volume_id = self._create_volume() + volume = db.volume_get(self.context, volume_id) + volume_utils.notify_usage_exists(self.context, volume) + LOG.info("%r" % test_notifier.NOTIFICATIONS) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'volume.exists') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project_id) + self.assertEquals(payload['user_id'], self.user_id) + self.assertEquals(payload['snapshot_id'], self.snapshot_id) + self.assertEquals(payload['volume_id'], volume.id) + self.assertEquals(payload['size'], self.volume_size) + for attr in ('display_name', 'created_at', 'launched_at', + 'status', 'audit_period_beginning', + 'audit_period_ending'): + self.assertTrue(attr in payload, + msg="Key %s not in payload" % attr) + db.volume_destroy(context.get_admin_context(), volume['id']) + + def test_get_host_from_queue_simple(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTNAME, self.BACKEND) + self.assertEquals(volume_utils.get_host_from_queue(fullname), + self.HOSTNAME) + + def test_get_host_from_queue_ip(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTIP, self.BACKEND) + self.assertEquals(volume_utils.get_host_from_queue(fullname), + self.HOSTIP) + + def test_get_host_from_queue_multi_at_symbol(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTNAME, + self.MULTI_AT_BACKEND) + self.assertEquals(volume_utils.get_host_from_queue(fullname), + self.HOSTNAME) + + def test_get_host_from_queue_ip_multi_at_symbol(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTIP, + self.MULTI_AT_BACKEND) + self.assertEquals(volume_utils.get_host_from_queue(fullname), + self.HOSTIP) diff --git a/cinder/tests/test_windows.py b/cinder/tests/test_windows.py new file mode 100644 index 0000000000..2ee09b73db --- /dev/null +++ b/cinder/tests/test_windows.py @@ -0,0 +1,220 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for Windows Server 2012 OpenStack Cinder volume driver +""" +import sys + +import cinder.flags +from cinder.tests.windows import basetestcase +from cinder.tests.windows import db_fakes +from cinder.tests.windows import windowsutils +from cinder.volume.drivers import windows + +FLAGS = cinder.flags.FLAGS + + +class TestWindowsDriver(basetestcase.BaseTestCase): + + def __init__(self, method): + super(TestWindowsDriver, self).__init__(method) + + def setUp(self): + super(TestWindowsDriver, self).setUp() + self.flags( + windows_iscsi_lun_path='C:\iSCSIVirtualDisks', + ) + self._volume_data = None + self._volume_data_2 = None + self._snapshot_data = None + self._connector_data = None + self._volume_id = '10958016-e196-42e3-9e7f-5d8927ae3099' + self._volume_id_2 = '20958016-e196-42e3-9e7f-5d8927ae3098' + self._snapshot_id = '30958016-e196-42e3-9e7f-5d8927ae3097' + self._iqn = "iqn.1991-05.com.microsoft:dell1160dsy" + + self._setup_stubs() + + self._drv = windows.WindowsDriver() + self._drv.do_setup({}) + self._wutils = windowsutils.WindowsUtils() + + def _setup_stubs(self): + + # Modules to mock + modules_to_mock = [ + 'wmi', + 'os', + 'subprocess', + 'multiprocessing' + ] + + modules_to_test = [ + windows, + windowsutils, + sys.modules[__name__] + ] + + self._inject_mocks_in_modules(modules_to_mock, modules_to_test) + + def tearDown(self): + try: + if (self._volume_data_2 and + self._wutils.volume_exists(self._volume_data_2['name'])): + self._wutils.delete_volume(self._volume_data_2['name']) + + if (self._volume_data and + self._wutils.volume_exists( + self._volume_data['name'])): + self._wutils.delete_volume(self._volume_data['name']) + if (self._snapshot_data and + self._wutils.snapshot_exists( + self._snapshot_data['name'])): + self._wutils.delete_snapshot(self._snapshot_data['name']) + if (self._connector_data and + self._wutils.initiator_id_exists( + "%s%s" % (FLAGS.iscsi_target_prefix, + self._volume_data['name']), + self._connector_data['initiator'])): + target_name = "%s%s" % (FLAGS.iscsi_target_prefix, + self._volume_data['name']) + initiator_name = self._connector_data['initiator'] + self._wutils.delete_initiator_id(target_name, initiator_name) + if (self._volume_data and + self._wutils.export_exists("%s%s" % + (FLAGS.iscsi_target_prefix, + self._volume_data['name']))): + self._wutils.delete_export( + "%s%s" % (FLAGS.iscsi_target_prefix, + self._volume_data['name'])) + + finally: + super(TestWindowsDriver, self).tearDown() + + def test_check_for_setup_errors(self): + self._drv.check_for_setup_error() + + def test_create_volume(self): + self._create_volume() + + wt_disks = self._wutils.find_vhd_by_name(self._volume_data['name']) + self.assertEquals(len(wt_disks), 1) + + def _create_volume(self): + self._volume_data = db_fakes.get_fake_volume_info(self._volume_id) + self._drv.create_volume(self._volume_data) + + def test_delete_volume(self): + self._create_volume() + + self._drv.delete_volume(self._volume_data) + + wt_disks = self._wutils.find_vhd_by_name(self._volume_data['name']) + self.assertEquals(len(wt_disks), 0) + + def test_create_snapshot(self): + #Create a volume + self._create_volume() + + wt_disks = self._wutils.find_vhd_by_name(self._volume_data['name']) + self.assertEquals(len(wt_disks), 1) + #Create a snapshot from the previous volume + self._create_snapshot() + + snapshot_name = self._snapshot_data['name'] + wt_snapshots = self._wutils.find_snapshot_by_name(snapshot_name) + self.assertEquals(len(wt_snapshots), 1) + + def _create_snapshot(self): + volume_name = self._volume_data['name'] + snapshot_id = self._snapshot_id + self._snapshot_data = db_fakes.get_fake_snapshot_info(volume_name, + snapshot_id) + self._drv.create_snapshot(self._snapshot_data) + + def test_create_volume_from_snapshot(self): + #Create a volume + self._create_volume() + #Create a snapshot from the previous volume + self._create_snapshot() + + self._volume_data_2 = db_fakes.get_fake_volume_info(self._volume_id_2) + + self._drv.create_volume_from_snapshot(self._volume_data_2, + self._snapshot_data) + + wt_disks = self._wutils.find_vhd_by_name(self._volume_data_2['name']) + self.assertEquals(len(wt_disks), 1) + + def test_delete_snapshot(self): + #Create a volume + self._create_volume() + #Create a snapshot from the previous volume + self._create_snapshot() + + self._drv.delete_snapshot(self._snapshot_data) + + snapshot_name = self._snapshot_data['name'] + wt_snapshots = self._wutils.find_snapshot_by_name(snapshot_name) + self.assertEquals(len(wt_snapshots), 0) + + def test_create_export(self): + #Create a volume + self._create_volume() + + retval = self._drv.create_export({}, self._volume_data) + + volume_name = self._volume_data['name'] + self.assertEquals( + retval, + {'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix, + volume_name)}) + + def test_initialize_connection(self): + #Create a volume + self._create_volume() + + self._drv.create_export({}, self._volume_data) + + self._connector_data = db_fakes.get_fake_connector_info(self._iqn) + + init_data = self._drv.initialize_connection(self._volume_data, + self._connector_data) + target_name = self._volume_data['provider_location'] + initiator_name = self._connector_data['initiator'] + + wt_initiator_ids = self._wutils.find_initiator_ids(target_name, + initiator_name) + self.assertEquals(len(wt_initiator_ids), 1) + + properties = init_data['data'] + self.assertNotEqual(properties['target_iqn'], None) + + def test_ensure_export(self): + #Create a volume + self._create_volume() + + self._drv.ensure_export({}, self._volume_data) + + def test_remove_export(self): + #Create a volume + self._create_volume() + + self._drv.create_export({}, self._volume_data) + + self._drv.remove_export({}, self._volume_data) diff --git a/cinder/tests/test_wsgi.py b/cinder/tests/test_wsgi.py new file mode 100644 index 0000000000..d5a80b0d94 --- /dev/null +++ b/cinder/tests/test_wsgi.py @@ -0,0 +1,264 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for `cinder.wsgi`.""" + +import os.path +import ssl +import tempfile +import unittest +import urllib2 + +from oslo.config import cfg +import webob +import webob.dec + +from cinder.api.middleware import fault +from cinder import exception +from cinder import test +from cinder import utils +import cinder.wsgi + +CONF = cfg.CONF + +TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'var')) + + +class TestLoaderNothingExists(test.TestCase): + """Loader tests where os.path.exists always returns False.""" + + def setUp(self): + super(TestLoaderNothingExists, self).setUp() + self.stubs.Set(os.path, 'exists', lambda _: False) + + def test_config_not_found(self): + self.assertRaises( + cinder.exception.ConfigNotFound, + cinder.wsgi.Loader, + ) + + +class TestLoaderNormalFilesystem(unittest.TestCase): + """Loader tests with normal filesystem (unmodified os.path module).""" + + _paste_config = """ +[app:test_app] +use = egg:Paste#static +document_root = /tmp + """ + + def setUp(self): + self.config = tempfile.NamedTemporaryFile(mode="w+t") + self.config.write(self._paste_config.lstrip()) + self.config.seek(0) + self.config.flush() + self.loader = cinder.wsgi.Loader(self.config.name) + + def test_config_found(self): + self.assertEquals(self.config.name, self.loader.config_path) + + def test_app_not_found(self): + self.assertRaises( + cinder.exception.PasteAppNotFound, + self.loader.load_app, + "non-existent app", + ) + + def test_app_found(self): + url_parser = self.loader.load_app("test_app") + self.assertEquals("/tmp", url_parser.directory) + + def tearDown(self): + self.config.close() + + +class TestWSGIServer(unittest.TestCase): + """WSGI server tests.""" + def _ipv6_configured(): + try: + out, err = utils.execute('cat', '/proc/net/if_inet6') + except exception.ProcessExecutionError: + return False + + if not out: + return False + return True + + def test_no_app(self): + server = cinder.wsgi.Server("test_app", None) + self.assertEquals("test_app", server.name) + + def test_start_random_port(self): + server = cinder.wsgi.Server("test_random_port", None, host="127.0.0.1") + self.assertEqual(0, server.port) + server.start() + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + @test.skip_if(not _ipv6_configured(), + "Test requires an IPV6 configured interface") + def test_start_random_port_with_ipv6(self): + server = cinder.wsgi.Server("test_random_port", + None, + host="::1") + server.start() + self.assertEqual("::1", server.host) + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + def test_app(self): + greetings = 'Hello, World!!!' + + def hello_world(env, start_response): + if env['PATH_INFO'] != '/': + start_response('404 Not Found', + [('Content-Type', 'text/plain')]) + return ['Not Found\r\n'] + start_response('200 OK', [('Content-Type', 'text/plain')]) + return [greetings] + + server = cinder.wsgi.Server("test_app", hello_world) + server.start() + + response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port) + self.assertEquals(greetings, response.read()) + + server.stop() + + def test_app_using_ssl(self): + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = cinder.wsgi.Server("test_app", hello_world) + server.start() + + response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port) + self.assertEquals(greetings, response.read()) + + server.stop() + + @test.skip_if(not _ipv6_configured(), + "Test requires an IPV6 configured interface") + def test_app_using_ipv6_and_ssl(self): + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = cinder.wsgi.Server("test_app", + hello_world, + host="::1", + port=0) + server.start() + + response = urllib2.urlopen('https://[::1]:%d/' % server.port) + self.assertEquals(greetings, response.read()) + + server.stop() + + +class ExceptionTest(test.TestCase): + + def _wsgi_app(self, inner_app): + return fault.FaultWrapper(inner_app) + + def _do_test_exception_safety_reflected_in_faults(self, expose): + class ExceptionWithSafety(exception.CinderException): + safe = expose + + @webob.dec.wsgify + def fail(req): + raise ExceptionWithSafety('some explanation') + + api = self._wsgi_app(fail) + resp = webob.Request.blank('/').get_response(api) + self.assertTrue('{"computeFault' in resp.body, resp.body) + expected = ('ExceptionWithSafety: some explanation' if expose else + 'The server has either erred or is incapable ' + 'of performing the requested operation.') + self.assertTrue(expected in resp.body, resp.body) + self.assertEqual(resp.status_int, 500, resp.body) + + def test_safe_exceptions_are_described_in_faults(self): + self._do_test_exception_safety_reflected_in_faults(True) + + def test_unsafe_exceptions_are_not_described_in_faults(self): + self._do_test_exception_safety_reflected_in_faults(False) + + def _do_test_exception_mapping(self, exception_type, msg): + @webob.dec.wsgify + def fail(req): + raise exception_type(msg) + + api = self._wsgi_app(fail) + resp = webob.Request.blank('/').get_response(api) + self.assertTrue(msg in resp.body, resp.body) + self.assertEqual(resp.status_int, exception_type.code, resp.body) + + if hasattr(exception_type, 'headers'): + for (key, value) in exception_type.headers.iteritems(): + self.assertTrue(key in resp.headers) + self.assertEquals(resp.headers[key], value) + + def test_quota_error_mapping(self): + self._do_test_exception_mapping(exception.QuotaError, 'too many used') + + def test_non_cinder_notfound_exception_mapping(self): + class ExceptionWithCode(Exception): + code = 404 + + self._do_test_exception_mapping(ExceptionWithCode, + 'NotFound') + + def test_non_cinder_exception_mapping(self): + class ExceptionWithCode(Exception): + code = 417 + + self._do_test_exception_mapping(ExceptionWithCode, + 'Expectation failed') + + def test_exception_with_none_code_throws_500(self): + class ExceptionWithNoneCode(Exception): + code = None + + msg = 'Internal Server Error' + + @webob.dec.wsgify + def fail(req): + raise ExceptionWithNoneCode() + + api = self._wsgi_app(fail) + resp = webob.Request.blank('/').get_response(api) + self.assertEqual(500, resp.status_int) diff --git a/cinder/tests/test_xenapi_sm.py b/cinder/tests/test_xenapi_sm.py new file mode 100644 index 0000000000..fecf133fae --- /dev/null +++ b/cinder/tests/test_xenapi_sm.py @@ -0,0 +1,509 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import StringIO +import unittest + +import mock +import mox +from oslo.config import cfg + +from cinder.db import api as db_api +from cinder import exception +from cinder.volume import configuration as conf +from cinder.volume import driver as parent_driver +from cinder.volume.drivers.xenapi import lib +from cinder.volume.drivers.xenapi import sm as driver +from cinder.volume.drivers.xenapi import tools + + +class MockContext(object): + def __init__(ctxt, auth_token): + ctxt.auth_token = auth_token + + +@contextlib.contextmanager +def simple_context(value): + yield value + + +def get_configured_driver(server='ignore_server', path='ignore_path'): + configuration = mox.MockObject(conf.Configuration) + configuration.xenapi_nfs_server = server + configuration.xenapi_nfs_serverpath = path + configuration.append_config_values(mox.IgnoreArg()) + return driver.XenAPINFSDriver(configuration=configuration) + + +class DriverTestCase(unittest.TestCase): + + def assert_flag(self, flagname): + self.assertTrue(hasattr(driver.FLAGS, flagname)) + + def test_config_options(self): + self.assert_flag('xenapi_connection_url') + self.assert_flag('xenapi_connection_username') + self.assert_flag('xenapi_connection_password') + self.assert_flag('xenapi_nfs_server') + self.assert_flag('xenapi_nfs_serverpath') + self.assert_flag('xenapi_sr_base_path') + + def test_do_setup(self): + mock = mox.Mox() + mock.StubOutWithMock(driver, 'xenapi_lib') + mock.StubOutWithMock(driver, 'xenapi_opts') + + configuration = mox.MockObject(conf.Configuration) + configuration.xenapi_connection_url = 'url' + configuration.xenapi_connection_username = 'user' + configuration.xenapi_connection_password = 'pass' + configuration.append_config_values(mox.IgnoreArg()) + + session_factory = object() + nfsops = object() + + driver.xenapi_lib.SessionFactory('url', 'user', 'pass').AndReturn( + session_factory) + + driver.xenapi_lib.NFSBasedVolumeOperations( + session_factory).AndReturn(nfsops) + + drv = driver.XenAPINFSDriver(configuration=configuration) + + mock.ReplayAll() + drv.do_setup('context') + mock.VerifyAll() + + self.assertEquals(nfsops, drv.nfs_ops) + + def test_create_volume(self): + mock = mox.Mox() + + ops = mock.CreateMock(lib.NFSBasedVolumeOperations) + drv = get_configured_driver('server', 'path') + drv.nfs_ops = ops + + volume_details = dict( + sr_uuid='sr_uuid', + vdi_uuid='vdi_uuid' + ) + ops.create_volume( + 'server', 'path', 1, 'name', 'desc').AndReturn(volume_details) + + mock.ReplayAll() + result = drv.create_volume(dict( + size=1, display_name='name', display_description='desc')) + mock.VerifyAll() + + self.assertEquals(dict(provider_location='sr_uuid/vdi_uuid'), result) + + def test_delete_volume(self): + mock = mox.Mox() + + ops = mock.CreateMock(lib.NFSBasedVolumeOperations) + drv = get_configured_driver('server', 'path') + drv.nfs_ops = ops + + ops.delete_volume('server', 'path', 'sr_uuid', 'vdi_uuid') + + mock.ReplayAll() + result = drv.delete_volume(dict( + provider_location='sr_uuid/vdi_uuid')) + mock.VerifyAll() + + def test_create_export_does_not_raise_exception(self): + configuration = conf.Configuration([]) + drv = driver.XenAPINFSDriver(configuration=configuration) + drv.create_export('context', 'volume') + + def test_remove_export_does_not_raise_exception(self): + configuration = conf.Configuration([]) + drv = driver.XenAPINFSDriver(configuration=configuration) + drv.remove_export('context', 'volume') + + def test_initialize_connection(self): + mock = mox.Mox() + + drv = get_configured_driver('server', 'path') + + mock.ReplayAll() + result = drv.initialize_connection( + dict( + display_name='name', + display_description='desc', + provider_location='sr_uuid/vdi_uuid'), + 'connector' + ) + mock.VerifyAll() + + self.assertEquals( + dict( + driver_volume_type='xensm', + data=dict( + name_label='name', + name_description='desc', + sr_uuid='sr_uuid', + vdi_uuid='vdi_uuid', + sr_type='nfs', + server='server', + serverpath='path', + introduce_sr_keys=['sr_type', 'server', 'serverpath'] + ) + ), + result + ) + + def test_initialize_connection_null_values(self): + mock = mox.Mox() + + drv = get_configured_driver('server', 'path') + + mock.ReplayAll() + result = drv.initialize_connection( + dict( + display_name=None, + display_description=None, + provider_location='sr_uuid/vdi_uuid'), + 'connector' + ) + mock.VerifyAll() + + self.assertEquals( + dict( + driver_volume_type='xensm', + data=dict( + name_label='', + name_description='', + sr_uuid='sr_uuid', + vdi_uuid='vdi_uuid', + sr_type='nfs', + server='server', + serverpath='path', + introduce_sr_keys=['sr_type', 'server', 'serverpath'] + ) + ), + result + ) + + def _setup_mock_driver(self, server, serverpath, sr_base_path="_srbp"): + mock = mox.Mox() + + drv = get_configured_driver(server, serverpath) + ops = mock.CreateMock(lib.NFSBasedVolumeOperations) + db = mock.CreateMock(db_api) + drv.nfs_ops = ops + drv.db = db + + mock.StubOutWithMock(driver, 'FLAGS') + driver.FLAGS.xenapi_nfs_server = server + driver.FLAGS.xenapi_nfs_serverpath = serverpath + driver.FLAGS.xenapi_sr_base_path = sr_base_path + + return mock, drv + + def test_create_snapshot(self): + mock, drv = self._setup_mock_driver('server', 'serverpath') + + snapshot = dict( + volume_id="volume-id", + display_name="snapshot-name", + display_description="snapshot-desc", + volume=dict(provider_location="sr-uuid/vdi-uuid")) + + drv.nfs_ops.copy_volume( + "server", "serverpath", "sr-uuid", "vdi-uuid", + "snapshot-name", "snapshot-desc" + ).AndReturn(dict(sr_uuid="copied-sr", vdi_uuid="copied-vdi")) + + mock.ReplayAll() + result = drv.create_snapshot(snapshot) + mock.VerifyAll() + self.assertEquals( + dict(provider_location="copied-sr/copied-vdi"), + result) + + def test_create_volume_from_snapshot(self): + mock, drv = self._setup_mock_driver('server', 'serverpath') + + snapshot = dict( + provider_location='src-sr-uuid/src-vdi-uuid') + volume = dict( + display_name='tgt-name', name_description='tgt-desc') + + drv.nfs_ops.copy_volume( + "server", "serverpath", "src-sr-uuid", "src-vdi-uuid", + "tgt-name", "tgt-desc" + ).AndReturn(dict(sr_uuid="copied-sr", vdi_uuid="copied-vdi")) + + mock.ReplayAll() + result = drv.create_volume_from_snapshot(volume, snapshot) + mock.VerifyAll() + + self.assertEquals( + dict(provider_location='copied-sr/copied-vdi'), result) + + def test_delete_snapshot(self): + mock, drv = self._setup_mock_driver('server', 'serverpath') + + snapshot = dict( + provider_location='src-sr-uuid/src-vdi-uuid') + + drv.nfs_ops.delete_volume( + "server", "serverpath", "src-sr-uuid", "src-vdi-uuid") + + mock.ReplayAll() + drv.delete_snapshot(snapshot) + mock.VerifyAll() + + def test_copy_volume_to_image_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_glance_plugin_to_upload_volume') + mock.StubOutWithMock(driver, 'is_xenserver_format') + context = MockContext('token') + + driver.is_xenserver_format('image_meta').AndReturn(True) + + drv._use_glance_plugin_to_upload_volume( + context, 'volume', 'image_service', 'image_meta').AndReturn( + 'result') + mock.ReplayAll() + + result = drv.copy_volume_to_image( + context, "volume", "image_service", "image_meta") + self.assertEquals('result', result) + + mock.VerifyAll() + + def test_copy_volume_to_image_non_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_image_utils_to_upload_volume') + mock.StubOutWithMock(driver, 'is_xenserver_format') + context = MockContext('token') + + driver.is_xenserver_format('image_meta').AndReturn(False) + + drv._use_image_utils_to_upload_volume( + context, 'volume', 'image_service', 'image_meta').AndReturn( + 'result') + mock.ReplayAll() + + result = drv.copy_volume_to_image( + context, "volume", "image_service", "image_meta") + self.assertEquals('result', result) + + mock.VerifyAll() + + def test_use_image_utils_to_upload_volume(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict(provider_location='sr-uuid/vdi-uuid') + context = MockContext('token') + + mock.StubOutWithMock(driver.image_utils, 'upload_volume') + + drv.nfs_ops.volume_attached_here( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', True).AndReturn( + simple_context('device')) + + driver.image_utils.upload_volume( + context, 'image_service', 'image_meta', 'device') + + mock.ReplayAll() + drv._use_image_utils_to_upload_volume( + context, volume, "image_service", "image_meta") + mock.VerifyAll() + + def test_use_glance_plugin_to_upload_volume(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict(provider_location='sr-uuid/vdi-uuid') + context = MockContext('token') + + mock.StubOutWithMock(driver.glance, 'get_api_servers') + + driver.glance.get_api_servers().AndReturn((x for x in ['glancesrv'])) + + drv.nfs_ops.use_glance_plugin_to_upload_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 'glancesrv', + 'image-id', 'token', '/var/run/sr-mount') + + mock.ReplayAll() + drv._use_glance_plugin_to_upload_volume( + context, volume, "image_service", {"id": "image-id"}) + mock.VerifyAll() + + def test_copy_image_to_volume_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_glance_plugin_to_copy_image_to_volume') + mock.StubOutWithMock(driver, 'is_xenserver_image') + context = MockContext('token') + + driver.is_xenserver_image( + context, 'image_service', 'image_id').AndReturn(True) + drv._use_glance_plugin_to_copy_image_to_volume( + context, 'volume', 'image_service', 'image_id').AndReturn('result') + mock.ReplayAll() + result = drv.copy_image_to_volume( + context, "volume", "image_service", "image_id") + self.assertEquals('result', result) + mock.VerifyAll() + + def test_copy_image_to_volume_non_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_image_utils_to_pipe_bytes_to_volume') + mock.StubOutWithMock(driver, 'is_xenserver_image') + context = MockContext('token') + + driver.is_xenserver_image( + context, 'image_service', 'image_id').AndReturn(False) + drv._use_image_utils_to_pipe_bytes_to_volume( + context, 'volume', 'image_service', 'image_id').AndReturn(True) + mock.ReplayAll() + drv.copy_image_to_volume( + context, "volume", "image_service", "image_id") + mock.VerifyAll() + + def test_use_image_utils_to_pipe_bytes_to_volume(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict(provider_location='sr-uuid/vdi-uuid') + context = MockContext('token') + + mock.StubOutWithMock(driver.image_utils, 'fetch_to_raw') + + drv.nfs_ops.volume_attached_here( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', False).AndReturn( + simple_context('device')) + + driver.image_utils.fetch_to_raw( + context, 'image_service', 'image_id', 'device') + + mock.ReplayAll() + drv._use_image_utils_to_pipe_bytes_to_volume( + context, volume, "image_service", "image_id") + mock.VerifyAll() + + def test_use_glance_plugin_to_copy_image_to_volume_success(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict( + provider_location='sr-uuid/vdi-uuid', + size=2) + + mock.StubOutWithMock(driver.glance, 'get_api_servers') + + driver.glance.get_api_servers().AndReturn((x for x in ['glancesrv'])) + + drv.nfs_ops.use_glance_plugin_to_overwrite_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 'glancesrv', + 'image_id', 'token', '/var/run/sr-mount').AndReturn(True) + + drv.nfs_ops.resize_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 2) + + mock.ReplayAll() + drv._use_glance_plugin_to_copy_image_to_volume( + MockContext('token'), volume, "ignore", "image_id") + mock.VerifyAll() + + def test_use_glance_plugin_to_copy_image_to_volume_fail(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict( + provider_location='sr-uuid/vdi-uuid', + size=2) + + mock.StubOutWithMock(driver.glance, 'get_api_servers') + + driver.glance.get_api_servers().AndReturn((x for x in ['glancesrv'])) + + drv.nfs_ops.use_glance_plugin_to_overwrite_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 'glancesrv', + 'image_id', 'token', '/var/run/sr-mount').AndReturn(False) + + mock.ReplayAll() + + self.assertRaises( + exception.ImageCopyFailure, + lambda: drv._use_glance_plugin_to_copy_image_to_volume( + MockContext('token'), volume, "ignore", "image_id")) + + mock.VerifyAll() + + def test_get_volume_stats_reports_required_keys(self): + drv = get_configured_driver() + + stats = drv.get_volume_stats() + + required_metrics = [ + 'volume_backend_name', 'vendor_name', 'driver_version', + 'storage_protocol', 'total_capacity_gb', 'free_capacity_gb', + 'reserved_percentage' + ] + + for metric in required_metrics: + self.assertTrue(metric in stats) + + def test_get_volume_stats_reports_unknown_cap(self): + drv = get_configured_driver() + + stats = drv.get_volume_stats() + + self.assertEquals('unknown', stats['free_capacity_gb']) + + def test_reported_driver_type(self): + drv = get_configured_driver() + + stats = drv.get_volume_stats() + + self.assertEquals('xensm', stats['storage_protocol']) + + +class ToolsTest(unittest.TestCase): + @mock.patch('cinder.volume.drivers.xenapi.tools._stripped_first_line_of') + def test_get_this_vm_uuid(self, mock_read_first_line): + mock_read_first_line.return_value = 'someuuid' + self.assertEquals('someuuid', tools.get_this_vm_uuid()) + mock_read_first_line.assert_called_once_with('/sys/hypervisor/uuid') + + def test_stripped_first_line_of(self): + mock_context_manager = mock.Mock() + mock_context_manager.__enter__ = mock.Mock( + return_value=StringIO.StringIO(' blah \n second line \n')) + mock_context_manager.__exit__ = mock.Mock(return_value=False) + mock_open = mock.Mock(return_value=mock_context_manager) + + with mock.patch('__builtin__.open', mock_open): + self.assertEquals( + 'blah', tools._stripped_first_line_of('/somefile')) + + mock_open.assert_called_once_with('/somefile', 'rb') diff --git a/cinder/tests/test_xiv.py b/cinder/tests/test_xiv.py new file mode 100644 index 0000000000..8db11a0cad --- /dev/null +++ b/cinder/tests/test_xiv.py @@ -0,0 +1,245 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Erik Zaadi +# Avishay Traeger + +import mox + +from cinder import exception +from cinder import flags +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers import xiv + + +FLAGS = flags.FLAGS + +FAKE = "fake" +VOLUME = {'size': 16, + 'name': FAKE, + 'id': 1} + +CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", } + + +class XIVFakeProxyDriver(object): + """Fake XIV Proxy Driver.""" + + def __init__(self, xiv_info, logger, expt): + """ + Initialize Proxy + """ + + self.xiv_info = xiv_info + self.logger = logger + self.exception = expt + self.xiv_portal = \ + self.xiv_iqn = FAKE + + self.volumes = {} + + def setup(self, context): + if self.xiv_info['xiv_user'] != FLAGS.san_login: + raise self.exception.NotAuthorized() + + if self.xiv_info['xiv_address'] != FLAGS.san_ip: + raise self.exception.HostNotFound(host='fake') + + def create_volume(self, volume): + if volume['size'] > 100: + raise self.exception.VolumeBackendAPIException(data='blah') + self.volumes[volume['name']] = volume + + def volume_exists(self, volume): + return self.volumes.get(volume['name'], None) is not None + + def delete_volume(self, volume): + if self.volumes.get(volume['name'], None) is not None: + del self.volumes[volume['name']] + + def initialize_connection(self, volume, connector): + if not self.volume_exists(volume): + raise self.exception.VolumeNotFound(volume_id=volume['id']) + lun_id = volume['id'] + + self.volumes[volume['name']]['attached'] = connector + + return {'driver_volume_type': 'iscsi', + 'data': {'target_discovered': True, + 'target_discovered': True, + 'target_portal': self.xiv_portal, + 'target_iqn': self.xiv_iqn, + 'target_lun': lun_id, + 'volume_id': volume['id'], + 'multipath': True, + 'provider_location': "%s,1 %s %s" % ( + self.xiv_portal, + self.xiv_iqn, + lun_id), }, + } + + def terminate_connection(self, volume, connector): + if not self.volume_exists(volume): + raise self.exception.VolumeNotFound(volume_id=volume['id']) + if not self.is_volume_attached(volume, connector): + raise self.exception.VolumeNotFoundForInstance(instance_id='fake') + del self.volumes[volume['name']]['attached'] + + def is_volume_attached(self, volume, connector): + if not self.volume_exists(volume): + raise self.exception.VolumeNotFound(volume_id=volume['id']) + + return (self.volumes[volume['name']].get('attached', None) + == connector) + + +class XIVVolumeDriverTest(test.TestCase): + """Test IBM XIV volume driver.""" + + def setUp(self): + """Initialize IVM XIV Driver.""" + super(XIVVolumeDriverTest, self).setUp() + + configuration = mox.MockObject(conf.Configuration) + configuration.san_is_local = False + configuration.append_config_values(mox.IgnoreArg()) + + self.driver = xiv.XIVDriver(configuration=configuration) + + def test_initialized_should_set_xiv_info(self): + """Test that the san flags are passed to the XIV proxy.""" + + self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_user'], + FLAGS.san_login) + self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_pass'], + FLAGS.san_password) + self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_address'], + FLAGS.san_ip) + self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_vol_pool'], + FLAGS.san_clustername) + + def test_setup_should_fail_if_credentials_are_invalid(self): + """Test that the xiv_proxy validates credentials.""" + + self.driver.xiv_proxy.xiv_info['xiv_user'] = 'invalid' + self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) + + def test_setup_should_fail_if_connection_is_invalid(self): + """Test that the xiv_proxy validates connection.""" + + self.driver.xiv_proxy.xiv_info['xiv_address'] = 'invalid' + self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) + + def test_create_volume(self): + """Test creating a volume.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + has_volume = self.driver.xiv_proxy.volume_exists(VOLUME) + self.assertTrue(has_volume) + self.driver.delete_volume(VOLUME) + + def test_volume_exists(self): + """Test the volume exist method with a volume that doesn't exist.""" + + self.driver.do_setup(None) + self.assertFalse(self.driver.xiv_proxy.volume_exists({'name': FAKE})) + + def test_delete_volume(self): + """Verify that a volume is deleted.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + self.driver.delete_volume(VOLUME) + has_volume = self.driver.xiv_proxy.volume_exists(VOLUME) + self.assertFalse(has_volume) + + def test_delete_volume_should_fail_for_not_existing_volume(self): + """Verify that deleting a non-existing volume is OK.""" + + self.driver.do_setup(None) + self.driver.delete_volume(VOLUME) + + def test_create_volume_should_fail_if_no_pool_space_left(self): + """Vertify that the xiv_proxy validates volume pool space.""" + + self.driver.do_setup(None) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, + {'name': FAKE, + 'id': 1, + 'size': 12000}) + + def test_initialize_connection(self): + """Test that inititialize connection attaches volume to host.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + self.driver.initialize_connection(VOLUME, CONNECTOR) + + self.assertTrue( + self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR)) + + self.driver.terminate_connection(VOLUME, CONNECTOR) + self.driver.delete_volume(VOLUME) + + def test_initialize_connection_should_fail_for_non_existing_volume(self): + """Verify that initialize won't work for non-existing volume.""" + + self.driver.do_setup(None) + self.assertRaises(exception.VolumeNotFound, + self.driver.initialize_connection, + VOLUME, + CONNECTOR) + + def test_terminate_connection(self): + """Test terminating a connection.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + self.driver.initialize_connection(VOLUME, CONNECTOR) + self.driver.terminate_connection(VOLUME, CONNECTOR) + + self.assertFalse(self.driver.xiv_proxy.is_volume_attached(VOLUME, + CONNECTOR)) + + self.driver.delete_volume(VOLUME) + + def test_terminate_connection_should_fail_on_non_existing_volume(self): + """Test that terminate won't work for non-existing volumes.""" + + self.driver.do_setup(None) + self.assertRaises(exception.VolumeNotFound, + self.driver.terminate_connection, + VOLUME, + CONNECTOR) + + def test_terminate_connection_should_fail_on_non_attached_volume(self): + """Test that terminate won't work for volumes that are not attached.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + + self.assertRaises(exception.VolumeNotFoundForInstance, + self.driver.terminate_connection, + VOLUME, + CONNECTOR) + + self.driver.delete_volume(VOLUME) diff --git a/cinder/tests/test_zadara.py b/cinder/tests/test_zadara.py new file mode 100644 index 0000000000..88c3bb7310 --- /dev/null +++ b/cinder/tests/test_zadara.py @@ -0,0 +1,581 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Zadara Storage, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Zadara VPSA volume driver +""" + +import copy +import httplib + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume.drivers import zadara + +from lxml import etree + +LOG = logging.getLogger("cinder.volume.driver") + +DEFAULT_RUNTIME_VARS = { + 'status': 200, + 'user': 'test', + 'password': 'test_password', + 'access_key': '0123456789ABCDEF', + 'volumes': [], + 'servers': [], + 'controllers': [('active_ctrl', {'display_name': 'test_ctrl'})], + 'counter': 1000, + + 'login': """ + + + 2012-04-30... + %s + 1 + 2012-02-21... + jsmith@example.com + jsmith + + 0 + """, + + 'good': """ + + 0 + """, + + 'bad_login': """ + + 5 + Some message... + """, + + 'bad_volume': """ + + 10081 + Virtual volume xxx not found + """, + + 'bad_server': """ + + 10086 + Server xxx not found + """, + + 'server_created': """ + + %s + 0 + """, +} + +RUNTIME_VARS = None + + +class FakeRequest(object): + def __init__(self, method, url, body): + self.method = method + self.url = url + self.body = body + self.status = RUNTIME_VARS['status'] + + def read(self): + ops = {'POST': [('/api/users/login.xml', self._login), + ('/api/volumes.xml', self._create_volume), + ('/api/servers.xml', self._create_server), + ('/api/servers/*/volumes.xml', self._attach), + ('/api/volumes/*/detach.xml', self._detach)], + 'DELETE': [('/api/volumes/*', self._delete)], + 'GET': [('/api/volumes.xml', self._list_volumes), + ('/api/vcontrollers.xml', self._list_controllers), + ('/api/servers.xml', self._list_servers), + ('/api/volumes/*/servers.xml', + self._list_vol_attachments)] + } + + ops_list = ops[self.method] + modified_url = self.url.split('?')[0] + for (templ_url, func) in ops_list: + if self._compare_url(modified_url, templ_url): + result = func() + return result + + def _compare_url(self, url, template_url): + items = url.split('/') + titems = template_url.split('/') + for (i, titem) in enumerate(titems): + if titem != '*' and titem != items[i]: + return False + return True + + def _get_parameters(self, data): + items = data.split('&') + params = {} + for item in items: + if item: + (k, v) = item.split('=') + params[k] = v + return params + + def _get_counter(self): + cnt = RUNTIME_VARS['counter'] + RUNTIME_VARS['counter'] += 1 + return cnt + + def _login(self): + params = self._get_parameters(self.body) + if (params['user'] == RUNTIME_VARS['user'] and + params['password'] == RUNTIME_VARS['password']): + return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key'] + else: + return RUNTIME_VARS['bad_login'] + + def _incorrect_access_key(self, params): + if params['access_key'] != RUNTIME_VARS['access_key']: + return True + else: + return False + + def _create_volume(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + params['attachments'] = [] + vpsa_vol = 'volume-%07d' % self._get_counter() + RUNTIME_VARS['volumes'].append((vpsa_vol, params)) + return RUNTIME_VARS['good'] + + def _create_server(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + vpsa_srv = 'srv-%07d' % self._get_counter() + RUNTIME_VARS['servers'].append((vpsa_srv, params)) + return RUNTIME_VARS['server_created'] % vpsa_srv + + def _attach(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + srv = self.url.split('/')[3] + vol = params['volume_name[]'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if vol_name == vol: + attachments = params['attachments'] + if srv in attachments: + #already attached - ok + return RUNTIME_VARS['good'] + else: + attachments.append(srv) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _detach(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + vol = self.url.split('/')[3] + srv = params['server_name[]'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if vol_name == vol: + attachments = params['attachments'] + if srv not in attachments: + return RUNTIME_VARS['bad_server'] + else: + attachments.remove(srv) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _delete(self): + vol = self.url.split('/')[3].split('.')[0] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if vol_name == vol: + if params['attachments']: + # there are attachments - should be volume busy error + return RUNTIME_VARS['bad_volume'] + else: + RUNTIME_VARS['volumes'].remove((vol_name, params)) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _generate_list_resp(self, header, footer, body, lst): + resp = header + for (obj, params) in lst: + resp += body % (obj, params['display_name']) + resp += footer + return resp + + def _list_volumes(self): + header = """ + 0 + """ + footer = "" + body = """ + %s + %s + Available + 1 + 1 + r5 + write-through + 2012-01-28... + 2012-01-28... + """ + return self._generate_list_resp(header, + footer, + body, + RUNTIME_VARS['volumes']) + + def _list_controllers(self): + header = """ + 0 + """ + footer = "" + body = """ + %s + %s + active + iqn.2011-04.com.zadarastorage:vsa-xxx:1 + 1.1.1.1 + 1.1.1.1 + 0.0.09-05.1--77.7 + ok + ok + test_chap_user + test_chap_secret + """ + return self._generate_list_resp(header, + footer, + body, + RUNTIME_VARS['controllers']) + + def _list_servers(self): + header = """ + 0 + """ + footer = "" + body = """ + %s + %s + %s + Active + 2012-01-28... + 2012-01-28... + """ + + resp = header + for (obj, params) in RUNTIME_VARS['servers']: + resp += body % (obj, params['display_name'], params['iqn']) + resp += footer + return resp + + def _get_server_obj(self, name): + for (srv_name, params) in RUNTIME_VARS['servers']: + if srv_name == name: + return params + + def _list_vol_attachments(self): + vol = self.url.split('/')[3] + + header = """ + 0 + """ + footer = "" + body = """ + %s + %s + %s + iqn.2011-04.com.zadarastorage:vsa-xxx:1 + 0 + """ + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if vol_name == vol: + attachments = params['attachments'] + resp = header + for server in attachments: + srv_params = self._get_server_obj(server) + resp += body % (server, + srv_params['display_name'], + srv_params['iqn']) + resp += footer + return resp + + return RUNTIME_VARS['bad_volume'] + + +class FakeHTTPConnection(object): + """A fake httplib.HTTPConnection for zadara volume driver tests.""" + def __init__(self, host, port, use_ssl=False): + LOG.debug('Enter: __init__ FakeHTTPConnection') + self.host = host + self.port = port + self.use_ssl = use_ssl + self.req = None + + def request(self, method, url, body): + LOG.debug('Enter: request') + self.req = FakeRequest(method, url, body) + + def getresponse(self): + LOG.debug('Enter: getresponse') + return self.req + + def close(self): + LOG.debug('Enter: close') + self.req = None + + +class FakeHTTPSConnection(FakeHTTPConnection): + def __init__(self, host, port): + LOG.debug('Enter: __init__ FakeHTTPSConnection') + super(FakeHTTPSConnection, self).__init__(host, port, use_ssl=True) + + +class ZadaraVPSADriverTestCase(test.TestCase): + """Test case for Zadara VPSA volume driver.""" + + def setUp(self): + LOG.debug('Enter: setUp') + super(ZadaraVPSADriverTestCase, self).setUp() + self.flags( + zadara_user='test', + zadara_password='test_password', + ) + global RUNTIME_VARS + RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) + + self.driver = zadara.ZadaraVPSAISCSIDriver() + self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) + self.stubs.Set(httplib, 'HTTPSConnection', FakeHTTPSConnection) + self.driver.do_setup(None) + + def tearDown(self): + super(ZadaraVPSADriverTestCase, self).tearDown() + + def test_create_destroy(self): + """Create/Delete volume.""" + volume = {'name': 'test_volume_01', 'size': 1} + self.driver.create_volume(volume) + self.driver.delete_volume(volume) + + def test_create_destroy_multiple(self): + """Create/Delete multiple volumes.""" + self.flags(zadara_vpsa_allow_nonexistent_delete=False) + self.driver.create_volume({'name': 'test_volume_01', 'size': 1}) + self.driver.create_volume({'name': 'test_volume_02', 'size': 2}) + self.driver.create_volume({'name': 'test_volume_03', 'size': 3}) + self.driver.delete_volume({'name': 'test_volume_02'}) + self.driver.delete_volume({'name': 'test_volume_03'}) + self.driver.delete_volume({'name': 'test_volume_01'}) + + self.assertRaises(exception.VolumeNotFound, + self.driver.delete_volume, + {'name': 'test_volume_04'}) + self.flags(zadara_vpsa_allow_nonexistent_delete=True) + self.driver.delete_volume({'name': 'test_volume_04'}) + + def test_destroy_non_existent(self): + """Delete non-existent volume.""" + self.flags(zadara_vpsa_allow_nonexistent_delete=False) + volume = {'name': 'test_volume_02', 'size': 1} + self.assertRaises(exception.VolumeNotFound, + self.driver.delete_volume, + volume) + self.flags(zadara_vpsa_allow_nonexistent_delete=True) + + def test_empty_apis(self): + """Test empty func (for coverage only).""" + context = None + volume = {'name': 'test_volume_01', 'size': 1} + self.driver.create_export(context, volume) + self.driver.ensure_export(context, volume) + self.driver.remove_export(context, volume) + + self.assertRaises(NotImplementedError, + self.driver.create_volume_from_snapshot, + volume, None) + self.assertRaises(NotImplementedError, + self.driver.create_snapshot, + None) + self.assertRaises(NotImplementedError, + self.driver.delete_snapshot, + None) + self.assertRaises(NotImplementedError, + self.driver.local_path, + None) + + self.driver.check_for_setup_error() + + def test_volume_attach_detach(self): + """Test volume attachment and detach.""" + volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} + connector = dict(initiator='test_iqn.1') + + self.driver.create_volume(volume) + + props = self.driver.initialize_connection(volume, connector) + self.assertEqual(props['driver_volume_type'], 'iscsi') + data = props['data'] + self.assertEqual(data['target_portal'], '1.1.1.1:3260') + self.assertEqual(data['target_iqn'], + 'iqn.2011-04.com.zadarastorage:vsa-xxx:1') + self.assertEqual(data['target_lun'], '0') + self.assertEqual(data['volume_id'], 123) + self.assertEqual(data['auth_method'], 'CHAP') + self.assertEqual(data['auth_username'], 'test_chap_user') + self.assertEqual(data['auth_password'], 'test_chap_secret') + + self.driver.terminate_connection(volume, connector) + self.driver.delete_volume(volume) + + def test_volume_attach_multiple_detach(self): + """Test multiple volume attachment and detach.""" + volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} + connector1 = dict(initiator='test_iqn.1') + connector2 = dict(initiator='test_iqn.2') + connector3 = dict(initiator='test_iqn.3') + + self.driver.create_volume(volume) + props1 = self.driver.initialize_connection(volume, connector1) + props2 = self.driver.initialize_connection(volume, connector2) + props3 = self.driver.initialize_connection(volume, connector3) + + self.driver.terminate_connection(volume, connector1) + self.driver.terminate_connection(volume, connector3) + self.driver.terminate_connection(volume, connector2) + self.driver.delete_volume(volume) + + def test_wrong_attach_params(self): + """Test different wrong attach scenarios.""" + volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} + volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102} + volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103} + connector1 = dict(initiator='test_iqn.1') + connector2 = dict(initiator='test_iqn.2') + connector3 = dict(initiator='test_iqn.3') + + self.assertRaises(exception.VolumeNotFound, + self.driver.initialize_connection, + volume1, connector1) + + def test_wrong_detach_params(self): + """Test different wrong detachment scenarios.""" + + volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} + volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102} + volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103} + connector1 = dict(initiator='test_iqn.1') + connector2 = dict(initiator='test_iqn.2') + connector3 = dict(initiator='test_iqn.3') + + self.driver.create_volume(volume1) + self.driver.create_volume(volume2) + props1 = self.driver.initialize_connection(volume1, connector1) + props2 = self.driver.initialize_connection(volume2, connector2) + + self.assertRaises(exception.ZadaraServerNotFound, + self.driver.terminate_connection, + volume1, connector3) + self.assertRaises(exception.VolumeNotFound, + self.driver.terminate_connection, + volume3, connector1) + self.assertRaises(exception.FailedCmdWithDump, + self.driver.terminate_connection, + volume1, connector2) + + def test_wrong_login_reply(self): + """Test wrong login reply.""" + + RUNTIME_VARS['login'] = """ + %s + 0 + """ + self.assertRaises(exception.MalformedResponse, + self.driver.do_setup, None) + + RUNTIME_VARS['login'] = """ + + + 2012-04-30... + 1 + 2012-02-21... + jsmith@example.com + jsmith + + %s + 0 + """ + self.assertRaises(exception.MalformedResponse, + self.driver.do_setup, None) + + def test_ssl_use(self): + """Coverage test for SSL connection.""" + self.flags(zadara_vpsa_use_ssl=True) + self.driver.do_setup(None) + self.flags(zadara_vpsa_use_ssl=False) + + def test_bad_http_response(self): + """Coverage test for non-good HTTP response.""" + RUNTIME_VARS['status'] = 400 + + volume = {'name': 'test_volume_01', 'size': 1} + self.assertRaises(exception.BadHTTPResponseStatus, + self.driver.create_volume, volume) + + def test_delete_without_detach(self): + """Test volume deletion without detach.""" + + volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} + connector1 = dict(initiator='test_iqn.1') + connector2 = dict(initiator='test_iqn.2') + connector3 = dict(initiator='test_iqn.3') + + self.driver.create_volume(volume1) + props1 = self.driver.initialize_connection(volume1, connector1) + props2 = self.driver.initialize_connection(volume1, connector2) + props3 = self.driver.initialize_connection(volume1, connector3) + + self.flags(zadara_vpsa_auto_detach_on_delete=False) + self.assertRaises(exception.VolumeAttached, + self.driver.delete_volume, volume1) + + self.flags(zadara_vpsa_auto_detach_on_delete=True) + self.driver.delete_volume(volume1) + + def test_no_active_ctrl(self): + + RUNTIME_VARS['controllers'] = [] + + volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} + connector = dict(initiator='test_iqn.1') + + self.driver.create_volume(volume) + self.assertRaises(exception.ZadaraVPSANoActiveController, + self.driver.initialize_connection, + volume, connector) diff --git a/cinder/tests/utils.py b/cinder/tests/utils.py new file mode 100644 index 0000000000..221e6b097b --- /dev/null +++ b/cinder/tests/utils.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# + +import os + +import cinder.context + +FLAGS = cinder.flags.FLAGS + + +def get_test_admin_context(): + return cinder.context.get_admin_context() + + +def is_cinder_installed(): + if os.path.exists('../../cinder.cinder.egg-info'): + return True + else: + return False diff --git a/cinder/tests/var/ca.crt b/cinder/tests/var/ca.crt new file mode 100644 index 0000000000..9d66ca6270 --- /dev/null +++ b/cinder/tests/var/ca.crt @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg +Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy +MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi +RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX +/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI +N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl +GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If +ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb +tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ +dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK +WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ +4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk +BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID +AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j +BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx +EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG +A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM +BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h +UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 +qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm +2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ ++C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX +TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a +NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V +xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv +ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy +I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY +9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA +WoRMgEwjGJWqzhJZUYpUAQ== +-----END CERTIFICATE----- diff --git a/cinder/tests/var/certificate.crt b/cinder/tests/var/certificate.crt new file mode 100644 index 0000000000..3c1aa6363b --- /dev/null +++ b/cinder/tests/var/certificate.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN +MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 +ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT +BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu +avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb +Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ +bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA +BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q +8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG +/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 +iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ +KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 +0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 +Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr +mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC +AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y +0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN +rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k +yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY +vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc +AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 +KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL +cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 +hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 +Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM +YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== +-----END CERTIFICATE----- diff --git a/cinder/tests/var/privatekey.key b/cinder/tests/var/privatekey.key new file mode 100644 index 0000000000..b63df3d29d --- /dev/null +++ b/cinder/tests/var/privatekey.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe +4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny +FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD +/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K +gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN ++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy +QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH +pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 +rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS +L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN +H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA +AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW +t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N +sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ +8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 +f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH +Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r +VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh +/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR +dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh +WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw +1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK +hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM +ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh +sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o +uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ +LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U +4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n +bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc +NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn +7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp +TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 +3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL +5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ +fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze +IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz +JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p +pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD +bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB +utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP +pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ +GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq +ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps +av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB +1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX +juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag +miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS +8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed +TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= +-----END RSA PRIVATE KEY----- diff --git a/cinder/tests/windows/__init__.py b/cinder/tests/windows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/windows/basetestcase.py b/cinder/tests/windows/basetestcase.py new file mode 100644 index 0000000000..44a9adb6a0 --- /dev/null +++ b/cinder/tests/windows/basetestcase.py @@ -0,0 +1,96 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +TestCase for MockProxy based tests and related classes. +""" + +import cinder.test +import gzip +import os +import pickle + +from cinder.tests.windows import mockproxy + +gen_test_mocks_key = 'CINDER_GENERATE_TEST_MOCKS' + + +class BaseTestCase(cinder.test.TestCase): + """TestCase for MockProxy based tests.""" + + def run(self, result=None): + self._currentResult = result + super(BaseTestCase, self).run(result) + + def setUp(self): + super(BaseTestCase, self).setUp() + self._mps = {} + + def tearDown(self): + super(BaseTestCase, self).tearDown() + + has_errors = len([test for (test, msgs) in self._currentResult.errors + if test.id() == self.id()]) > 0 + failed = len([test for (test, msgs) in self._currentResult.failures + if test.id() == self.id()]) > 0 + + if not has_errors and not failed: + self._save_mock_proxies() + + def _save_mock(self, name, mock): + path = self._get_stub_file_path(self.id(), name) + pickle.dump(mock, gzip.open(path, 'wb')) + + def _get_stub_file_path(self, test_name, mock_name): + # test naming differs between platforms + prefix = 'cinder.tests.' + if test_name.startswith(prefix): + test_name = test_name[len(prefix):] + file_name = '{0}_{1}.p.gz'.format(test_name, mock_name) + return os.path.join(os.path.dirname(mockproxy.__file__), + "stubs", file_name) + + def _load_mock(self, name): + path = self._get_stub_file_path(self.id(), name) + if os.path.exists(path): + return pickle.load(gzip.open(path, 'rb')) + else: + return None + + def _load_mock_or_create_proxy(self, module_name): + m = None + if (not gen_test_mocks_key in os.environ or + os.environ[gen_test_mocks_key].lower() + not in ['true', 'yes', '1']): + m = self._load_mock(module_name) + else: + module = __import__(module_name) + m = mockproxy.MockProxy(module) + self._mps[module_name] = m + return m + + def _inject_mocks_in_modules(self, objects_to_mock, modules_to_test): + for module_name in objects_to_mock: + mp = self._load_mock_or_create_proxy(module_name) + for mt in modules_to_test: + module_local_name = module_name.split('.')[-1] + setattr(mt, module_local_name, mp) + + def _save_mock_proxies(self): + for name, mp in self._mps.items(): + m = mp.get_mock() + if m.has_values(): + self._save_mock(name, m) diff --git a/cinder/tests/windows/db_fakes.py b/cinder/tests/windows/db_fakes.py new file mode 100644 index 0000000000..a93dbc6f9b --- /dev/null +++ b/cinder/tests/windows/db_fakes.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Pedro Navarro Perez +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Stubouts, mocks and fixtures for windows volume test suite +""" + + +def get_fake_volume_info(name): + return {'name': name, + 'size': 1, + 'provider_location': 'iqn.2010-10.org.openstack:' + name, + 'id': 1, + 'provider_auth': None} + + +def get_fake_snapshot_info(volume_name, snapshot_name): + return {'name': snapshot_name, + 'volume_name': volume_name, } + + +def get_fake_connector_info(initiator): + return {'initiator': initiator, } diff --git a/cinder/tests/windows/mockproxy.py b/cinder/tests/windows/mockproxy.py new file mode 100644 index 0000000000..d39a96300d --- /dev/null +++ b/cinder/tests/windows/mockproxy.py @@ -0,0 +1,238 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations + +""" +Classes for dynamic generation of mock objects. +""" + +import inspect + + +def serialize_obj(obj): + if isinstance(obj, float): + val = str(round(obj, 10)) + elif isinstance(obj, dict): + d = {} + for k1, v1 in obj.items(): + d[k1] = serialize_obj(v1) + val = str(d) + elif isinstance(obj, list): + l1 = [] + for i1 in obj: + l1.append(serialize_obj(i1)) + val = str(l1) + elif isinstance(obj, tuple): + l1 = () + for i1 in obj: + l1 = l1 + (serialize_obj(i1),) + val = str(l1) + else: + val = str(obj) + return val + + +def serialize_args(*args, **kwargs): + """Workaround for float string conversion issues in Python 2.6.""" + return serialize_obj((args, kwargs)) + + +class Mock(object): + def _get_next_value(self, name): + c = self._access_count.get(name) + if c is None: + c = 0 + else: + c = c + 1 + self._access_count[name] = c + return self._values[name][c] + + def _get_next_ret_value(self, name, params): + d = self._access_count.get(name) + if d is None: + d = {} + self._access_count[name] = d + c = d.get(params) + if c is None: + c = 0 + else: + c = c + 1 + d[params] = c + return self._values[name][params][c] + + def __init__(self, values): + self._values = values + self._access_count = {} + + def has_values(self): + return len(self._values) > 0 + + def __getattr__(self, name): + if name.startswith('__') and name.endswith('__'): + return object.__getattribute__(self, name) + else: + if isinstance(self._values[name], dict): + def newfunc(*args, **kwargs): + params = serialize_args(args, kwargs) + return self._get_next_ret_value(name, params) + return newfunc + else: + return self._get_next_value(name) + + def __str__(self): + return self._get_next_value('__str__') + + def __iter__(self): + return getattr(self._get_next_value('__iter__'), '__iter__')() + + def __len__(self): + return self._get_next_value('__len__') + + def __getitem__(self, key): + return self._get_next_ret_value('__getitem__', str(key)) + + def __call__(self, *args, **kwargs): + params = serialize_args(args, kwargs) + return self._get_next_ret_value('__call__', params) + + +class MockProxy(object): + def __init__(self, wrapped): + self._wrapped = wrapped + self._recorded_values = {} + + def _get_proxy_object(self, obj): + if (hasattr(obj, '__dict__') or + isinstance(obj, tuple) or + isinstance(obj, list) or + isinstance(obj, dict)): + p = MockProxy(obj) + else: + p = obj + return p + + def __getattr__(self, name): + if name in ['_wrapped']: + return object.__getattribute__(self, name) + else: + attr = getattr(self._wrapped, name) + if (inspect.isfunction(attr) or + inspect.ismethod(attr) or + inspect.isbuiltin(attr)): + def newfunc(*args, **kwargs): + result = attr(*args, **kwargs) + p = self._get_proxy_object(result) + params = serialize_args(args, kwargs) + self._add_recorded_ret_value(name, params, p) + return p + return newfunc + elif (hasattr(attr, '__dict__') or + (hasattr(attr, '__getitem__') and not + (isinstance(attr, str) or isinstance(attr, unicode)))): + p = MockProxy(attr) + else: + p = attr + self._add_recorded_value(name, p) + return p + + def __setattr__(self, name, value): + if name in ['_wrapped', '_recorded_values']: + object.__setattr__(self, name, value) + else: + setattr(self._wrapped, name, value) + + def _add_recorded_ret_value(self, name, params, val): + d = self._recorded_values.get(name) + if d is None: + d = {} + self._recorded_values[name] = d + l = d.get(params) + if l is None: + l = [] + d[params] = l + l.append(val) + + def _add_recorded_value(self, name, val): + if name not in self._recorded_values: + self._recorded_values[name] = [] + self._recorded_values[name].append(val) + + def get_mock(self): + values = {} + for k, v in self._recorded_values.items(): + if isinstance(v, dict): + d = {} + values[k] = d + for k1, v1 in v.items(): + l = [] + d[k1] = l + for i1 in v1: + if isinstance(i1, MockProxy): + l.append(i1.get_mock()) + else: + l.append(i1) + else: + l = [] + values[k] = l + for i in v: + if isinstance(i, MockProxy): + l.append(i.get_mock()) + elif isinstance(i, dict): + d = {} + for k1, v1 in v.items(): + if isinstance(v1, MockProxy): + d[k1] = v1.get_mock() + else: + d[k1] = v1 + l.append(d) + elif isinstance(i, list): + l1 = [] + for i1 in i: + if isinstance(i1, MockProxy): + l1.append(i1.get_mock()) + else: + l1.append(i1) + l.append(l1) + else: + l.append(i) + return Mock(values) + + def __str__(self): + s = str(self._wrapped) + self._add_recorded_value('__str__', s) + return s + + def __len__(self): + l = len(self._wrapped) + self._add_recorded_value('__len__', l) + return l + + def __iter__(self): + it = [] + for i in self._wrapped: + it.append(self._get_proxy_object(i)) + self._add_recorded_value('__iter__', it) + return iter(it) + + def __getitem__(self, key): + p = self._get_proxy_object(self._wrapped[key]) + self._add_recorded_ret_value('__getitem__', str(key), p) + return p + + def __call__(self, *args, **kwargs): + c = self._wrapped(*args, **kwargs) + p = self._get_proxy_object(c) + params = serialize_args(args, kwargs) + self._add_recorded_ret_value('__call__', params, p) + return p diff --git a/cinder/tests/windows/stubs/README.rst b/cinder/tests/windows/stubs/README.rst new file mode 100644 index 0000000000..150fd3ad17 --- /dev/null +++ b/cinder/tests/windows/stubs/README.rst @@ -0,0 +1,2 @@ +Files with extension p.gz are compressed pickle files containing serialized +mocks used during unit testing diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_check_for_setup_errors_wmi.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_check_for_setup_errors_wmi.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..dada5e65488544313a26c2cdbef0d70915191d59 GIT binary patch literal 473 zcmV;~0Ve(*iwFq9^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUt?%xV{2b#Z*pIAWps6LUuAM~Z*p^AcWr4dZ~&E(T}#6-6o&85uaMoW1L>NNZms`d zpdu=GGYDN4N?qH8CUeTzfA^ehV@|vgn$w(|n>8cYoswC#NuwmR4WTn35O zO}{on8jKG<-C<&PKJBdDxWRsY!2|ZdG0;k_woMzFUMaAvccTM14v)bJcy8hegJq@_vQw60rUM)^yW-@IUWO&Z5}-+O90fZW*6gcCwqZv)vqx PAN{1?@2ehz=>h-%(WC1R literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_os.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_export_os.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..c425615b9262a82c16d4bf2ba89881c9a38818f3 GIT binary patch literal 439 zcmV;o0Z9HIiwFq9^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUt@A*VRU6*Wq5FJa&%vBb1rZI%~8v0!!QuM?^lFe>_B2HDSoKe^pZn+XrL#9aTL{Y zl3K!&^Pu$KyHe6ZODLuEP=n>&+12dqSO}$0WhZOEL#Wo+t`}BypkoMyXxf$RA}ftG z(E$^6FwwITTivPS3GOfgO+b|8vTt^_Y0DDSawCNWP2t{Z1`m1);Yug42+B$bX-p|p z-`W6l>IxYctn8~@FAZY3!#FnU%C0ddC}XDyff|DvKc1FQ1C-GZaifdm2~>9k~&K^s%eplo@?2!GvgYb{ zS9RGIvx{ykvsYV{9bRwqY<{pPtID701y!8K<{&GauXOrm%`dl}g`o`_(>+=a+PflhhyQb+*?$0>V;TQX2k9x3EJTahgp8hC6=1_Xu>yAgZNzbXf zt1I_w*}WSWdinfv^jFVc&d+|UcinBdB6RI|00^5K0mZQcTO2EN^4L24_Bg*b7Io>Q zuy%I$?mA2zCT^=;z3u8|GtB%oVTKU%`cHSZIsfVL`=~y-r!prlWwFnt)SgQjh_gMH zN<=P|XZ5OVTfSVCdv7Wv-7>jK#W(Zxrk^J;uk>e_{|^NGZGp>~;aAPCkj@3&vcijz!V6M(!3r;;!i%Kv zBIhJIrOak+7uwGyM3HMLd6z&Bkuqkd^xwgO&XFO*K= zpB`yIlLmAcA2nc-1`NcVH2CCBQv8c@?S^fv-*09pKpcaZHM`}k**b5WWp(rXUvIFH zgx;uq^d|S{O#rPUdXv!Grxk38I?L4ANS#g8Sryl9^Y6QE%Sy2$7qsMpmR-;bDB7_| zKIn}gTL#I3@JbAF1u>!z1n~!=e_f|QW(Y*J=Ee1OvSpx><74@nvmXn{698l=NYNDl zk}UuvUjT^4Fg*-;AG3f7KAHxH^CbA|xITdi07l6GNLvcXdkPpg6)^28P_=m^h)20L zmIoj+gMEm^3=YirBD4&J4&=xOK#~!FWG4U_tpH^50`QI;x$w69ZA=op;WJz_hHD9~ z3TFxL`#oIm@)52g5wU@QH+*);$ld6ez`UtGK;lgeypc)wEBF5VjaE)Yz+um?An ze5_FcP9EX+vcKuOfJz zNnU3LuQQmm>8r$Nv}yPZYXa{konp*KtPLn_@>x6htgjhS zvQ#*F$=Y$&TgrMAob?`jJdul(^+=_o*C`71Wu2nP!@<8`oz4$lxpi~rUaVH3e6&19 zhcP2KC>Ty?Xb=({7~oJ(5_SjSYtTSI1I4%x(L7ThG@g18)3FB;K2s>#f8J|V_4*D! zxy^{&ZXu5PEkwoLQXn6T1X2$mO*WP|B9kZsi87HW2NFGj^Mm)dZuda5OBEjOW&d>U zU8?FPKTO{4n)T%6+4S3e*u_9 J97PWi006G8!utRK literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_os.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_os.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..5e55a96f30d141bbd10e3fce92051ca47da1b7bd GIT binary patch literal 441 zcmV;q0Y?5GiwFqA^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUt@A*VRU6*b8ca9b7*gLUvG0RZ~)Cw%WA_g5WMeKgk0=EVk{|ssMqw8LwjhTCxdYm z)p3$q!jki#^xwNu(n3oprSwpP<=xrU?Ce+wrB7uiYrsRO*4VBWR&}6b2!&|cmFyxb zjWy8$6Lm1rvl3g~spARmFak|Ll;yH-cD8BD64Y`dg#}IF-f9L9dJ5r6C$I?0N(gC8 zDOBIu0CegK85peWt6eV*V!6XOHtWi+F()WvrwM@?gBm}cmQds+%=MBf)SieWU}}Gz zq}is$a!#{KvY7K|zh3z@zFUgseku10oPaAMe>w`uSbWVF=6qq^7xwN8 j`|yP&*mlO!f8YnpoF6Rz8$bBP4}7CHc;MH!djkLf%Yn{s literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_wmi.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_snapshot_wmi.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..4e5879f678a3144027df965ee71d1bb87090af56 GIT binary patch literal 1476 zcmV;#1v~m5iwFqA^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUt@A*VRU6*b8ca9b7*gLUw3V3E^q+7S50r*Mi9O8R|vk?E+Tnnzc)Y+wuPWN*eL2C zxfl>=dLy%zL`J0SrbhmIXLq?vi;42lqBaalq?WU9XWq=4DT=E8G~d`ohL@sRH|=Iy zwACi7Np@5e<@(lcCarDSX7Y$P)nhYRRmFY1ss8$uUEsH@hOC&+Z?@&KE!XpTR^9w* zi#DricG=b}`=jRB(QPfVtHXI-6t-#RMYUbGhgmJ%4=QW$mxp}0wGBSg?wP^G?=NO} z3&h<6;De)U{J+M_vAez+9Xz?uRkbef?Z$n5J#yERlgY`Zs@jvsRq5_O9~`^Y*N0D^ zuMdx%NtzukYhc*|Z7(2Oc%Z-$DG0xt%oZLRAhv}E1ll2bJHJ}zb#qra@L-%7N_Ndq zKr@uOcPQ)3P#(=t=Y{i)&0C8WsPq=pz=C!bj9bn~XMsVqz^L=%yvUc!`5bL9=AZJt z{(1j)+WQ8i)qQ`%uf}JjgqHhC5<9P(>Lmzea1IrmN!jv6E8RU4j}FT@vwUY zBoZKj%Ys(~-RqUvZSRyHL-kdM3FeL__^!i*Kzt&=QW2j{_z-gNki_r?=SbMKASqgq z{t%LP7Nm$4q{MU8Tc1J7KXB|kIymyD>;WD;hblc?70E7lTil6h1*}+N2FS3M0+D<*N7%)M1y-n)6R%y(TL_TY+CFM+tfakh5bEm z@A~kazrG&)l~?Civv=jD-R4V&s2MH*VuU6j#}Nud?tm@sOWj4zv21+;D9dFpajhN3Yq1DG#Ppoz~wnWXIiCAbiWCvK+p*o z3I%wjfFKo+v;umu091=EFhJ$cMTUJpGHQ=FssL9H;Ti{*1vy4N>foXtMYxQ4C0N?E zR{>xlHYh4Hc;?JqvnV#stO^!%GHa4ql$z)T4lm^e9(n}+MU&vW1RmN1p85pdHwvgr zi+FTYrx|(9qeUpMAckZRo(v+8K_oJWo(!TVgOD$jrXT_t#E=ZalR*YD$V3L&lL5sm zxk2)8F+ln(lAVQ=MY7K#l&*Mm)UOvhdKamo5`vdP2wDh9453#;KsiiDMSH{*<}Y=0?b|@mgFX91 zDT_>>MP7-fc4=GdYl-odoD%Z=Mu!pzjXB-d>ikwUtzXE(!a@5+iDM)sp?4>dk(4Ax zQj!=+VLv0?K9~8*_Ax0xt|y3(jDbw5&0L#WzGW9dlv!MxfaF{xjO8=G$3v_xb z=>}ykFdw&e^U3YbAUxa|2@bB1VMROoW%OZFtw!gwi}^Hf^Ec(vjvnvq#*SS0M!$}_ zzVUzVHT@O`ua5hk@yU-JLr=26TQ=k^^Souj6D;wT^}KcSTZ}mBsxM7l!cq<~VJRPS ziF+>bz$Koz#CtArl@iuLgiwFqB^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUt@A*VRU6*c5iHTZDn6(a&K*4b8ca9b7*gLUvG0RZ~)DdO;6k~5QgvbD-f4tB{qsb z^5I_Tg+qJjN}M7^S+5C!ZEVF3A42?hX3Q>9t8G~!4n#^~kH_!4^Tg5Gy2o}hJ>Wyz z(T8c)!A{_ENVOiujhV7weDK*lPVBwUep`L(Cj0ORKX3$&fo|LNJZ!@-wk_E8jnM%d zho8X-_~j&|8&|+8X*;cr_pP?`7!q(LEmYuf<*wV##$&9en8IY&g=?HMw6S%JL63tT zUs0BE)MbKlnS?$Hu>yP_x3d!Wn}-2SlR_0^HOA5=Nl(+JX_E7A&0p8BI$vE}4pW%B z?f1dodP+N6oSvP~;OUv33yhq^NwOeiMc#N07deB@d%y1faLF0Es$5EL#8R2+YFTV*#jDPUT&e8tdK26DUsJq}P0=SH z0|!bTTMETQeu#@8E+TRfdoJRQizqPdHCBF{2Y=sWeH0H-5f4%So5!)GNK75#A#?gm q7_}_L=13AYrvWfL0p>mq?Z~8H^XlhV=|S=O6x zvu(8~{6|@@n|6Cyw)M8yh~ls;m+SLtJ87$?Z6;U#qP}V-7j^l0v#q~=E8h5T#RiIU zHaojquG-~#HY@70f2y)AHdwrEH&pz+k;UQpMinPdX7jSFnr2qkm+SUPvC-iNQ#AgU zFZ0!9)%bfh+;jfn4{xUa5=6KL@HY-m{r^+{@icrs8y$QPe=q9w@^iHfUtf&E=kw>2 z=i9n&pI==p!}X7Yr{U?-C*OaZK6x5UVsW_I02K>wdjZ9Qfr9i%LGfz3STHp3*cJ>3 zY(VsGcCwytnoo5A4=$KNv1FO^6n-1A4Is6!Kzfn`|pb2wNYA+m2_$>jXfH>m-6M}Amty6q$s?Tl<9fT$$qGAB}`G+nv%PODdq902+JgV zHskY?~-NENJcbwYlU3GJGQh=i$L2@3IGYa2=}VXf>jE zyF=}eTNA?9Fg%mO*W~auDsgM{o!lN*O}Sle+GV}&NhaZx6`;ozI6anF(b!ey<5-+-_5NQ8^gHmEK-6A-~2Ab2kzl0!gbkAP5VuOM{V zEC^FfQGt|}JS~G{x^~4i?ReRI=1ObhyKVr&mtYUVmtZc$m_Yafnm|A(KX0omUu!yB zz@-F04{aH?)xDOo7ci$UV9sAayeG)MEX(SzdHbo4v7;BKgMZ7Dqm$|T<+iZXoq@xxOzOH+M@qD)T@Qfh@yiO$3=l5oA_H@F|eeKvGTv zIiyW8ZL)hk(?-Jo*q2CoZ=S1%5G ziFQup+euR~ZPhSsHTzgcAM5O6-TOFbOGlbknKV7jSAVwSZ=7%O->E_%yf%ppPAlgUJVwp8v{G6pq4ABjk~nDg4(`<+AL~dA4}E1nh4hp z;Ray?7n@#}qzzowHgLoZ*Q#1`vjr}C%}dZ!To2Ags^@O#Psjdrow?*s6xp9Bat%g- zKSv2ZOo_df5)>5s=|Rq-2>wUWfixMe?cuWjnFsV@HHB+D44QN9&LOtvko$A!8+6Fg zfS@01>tG^wJDepP4-*^>h$$Knb2MO>G1At(NZhk^Rt&_>LZ-1-4D^bTwoc-)wocqz zAq|=^T{e3k`cBP80RN0lPccoX2M8A@cq`0WKPeV1R5vUBq?U3afq5B#U zW|kCrUR~~qJk74u(Um&8QunUZxmm_I%RI)`<3xmJIKcA=gmDJKJOp7s1)<43H=5jY zqZ!Weh6j1WT?^XOwV=&i3wl{vq;RveN4VLzl$WfV!pXh-pLNsngYjs=-Z)!eOg3V2 z8I$WVg)yh|1xwBCz6k5QonKVFBbNWJCtf(j7{sLBE++M+S~u;y{QTnAUDSn_8_aI# zG#`m#s9w(K{i!XPan+S{x4UUvZgfk@)Dnd%9N+e8= z(-y;g@pOc@;=vaI9+>L#IInV$Bk`rY)zOB_6V+D}2>`AB8RByijk4C0o+GAHp= z4D+!VUON}V-s4^sv&SEqlb@I<_vHk;O%1YuFk}ItiQBiClRgXRS{8Vr{pE((e( zyPt+}OB}Zp5)KcnJl&O}I%(awc?zd82cbtOwkIDa&BIh;-7XSd!?u(89 literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_os.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_create_volume_os.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..fc8eee072dee3c8906d245c49ab5803c79ab5f3a GIT binary patch literal 439 zcmV;o0Z9HIiwFqA^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUt@A*VRU6*c5iHTZDn6?b1rZI%~8v0!!QuM?^lFe>_B2HDSoKe^pZn+XrL#9aTL{Y zl3K!&^Pu$KyHe6ZODLuEP=n>&+12dqSO}$0WhZOEL#Wo+t`}BypkoMyXxf$RA}ftG z(E$^6FwwITTivPS3GOfgO+b|8vTt^_Y0DDSawCNWP2t{Z1`m1);Yug42+B$bX-p|p z-`W6l>IxYctn8~@FAZY3!#FnU%C0ddC}XDyff|DvKc1FQ1C-GZaifdm2~>9k~&K^s%eplo@?2!GqQ$mUNsgBl{qN%?O*zt zl+5zH+}+z|(b=|Z7YFxH9oogV${*{d`tzCGxW7mZBu~@3eYxq%U7C{W?hl)Hq-Nx{ zt2z0t7G!o`OR_pkvplzLo95Mi*PW4C;Rl+u?vtl%v$w5#m*6|&Hov=Bx<{b+4B%eO z*6w@lmUCR+Or{_4_qN)VkG8=-FDJNOTr4h{s_HHd+Y+BYP0w-n`t0MU_1QT>GBVrL zz)9h-3m}Cj3c@i3>2Fh{@YKL@D?A}ckkQ-px@;d2!XN~rLkI>Af-&6B_z{AM2*FT+ zTYGr-_6Wk12hk&t1VId*;PeQ>A%bv*GNyU9*`%q%;M~6zugY_6pUT|+nspB+g|05w zqpxyxwOamOHr+nkAiH+F0jQA)1vR%|)Lbyj=f>*yb9tW_rZdZ_F^i{%`*3x*xXqig z?#gO+y7_BhP7tf|FT32`{PglX6Q9i~m#8IILoK;>TJj*AIch0r)KWNA@3OY_OZXX_ z77T0G3oJxDE&J2%u22SIltFEGSl#CQ1Ym$EtQreThZC`|lD@DKh_NQ@JH?uCqe*FSQW`%gO`MdbpOgmEdu=zivmVj+0Z=Tb*>vVr zI5D3N<}&rdyB;UEu`T<>Pv2jvw(}c#*fsk%x)SZ2HJ=0^o^+jps(fz@43yk z_LP|N!*0PS&^gdW)f9`Ww$3TD{PFT%rwky3>+EP41z;lqh`=(o5s!4tFSk$Z6`H<{U~G2a-424?LGO2PGCXjOH*OqY%Ae92M=(}%ZI&f zp7C~Peg5U3k)|tGUwJqAVe-DIwv(&nO?sVm+0W(1P7V*Yu@lVRq!y{9GzRuYz;2BJ?SUaU}^~Ng`B| z=qpK_D#>|Q>{evxtjhzL5s>pS{O2g-0v#*>IR}eC&SPR@N%}is$a!#{KvY7K|zh3z@zFUgseku10oPaAMe>w`uSbWVF=6qq^7xwN8 j`|yP&*mlO!f8YnpoF6Rz8$bBP4}7CHc;MH!djkLf%~Q^B literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_wmi.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_snapshot_wmi.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..26de5bdaccd098826162bbe5fdb86449041b5636 GIT binary patch literal 1504 zcmV<61t0n!iwFqC^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUu0!$WprgFJm1(wMu)OqHSOlE zZ0k+7X4yemR;w$!nY6ZPo5=&6)DO+%wk~hhoBHq1*(v>(tsyJt^UJ$x*;cFhJgYDN zvSpjCF*|G5Jo|GkvV*I&%+B}cMOoUWnV0q5s@>1l%H7aeLwD|r<(+NlnsL{18vNmO zMyJ5sIRITaxS-z)Ivl#=o6+8*`+HljsvEm;|Gpi$_C2g^0^Y(dxy$QGU`2x1D-zfENePYsCM!V?1H7`>aHuZneZT|4rSI~Zi! zFbD*L=$=8|!JvpR=u|k_*rK(BfXXB20fKP|xf{+|hrl5saCAD(%VN2l&k2Kbe^u<% z=fk^cuZ?0TrB;B?3d5!2!__t)u89v1FV8zZf<-<8JfDSatJdCnO2W@xaJs?Vsc@)5 zX73-T$Hy8t{4$p2H!~J2SL5U3U&R=Zp_!gckMm>sg`QAZ(KO9vJX{uAgoGm`!rLPu znFvW57DSP_QiXpAt*ly zB{~EpBZ5*iul1d8Anh->;yltvzqEEs)NMrt#Kb zprdIp(lk6uY($jU$ZZ6p9Dnb`2e-XbgbM7l57hlWVc>kDI z&5ax8J1rk{TJ8!v4+LUi151?+%=OxteV~>OFu;P9v7whSZ)PmG8B2D?dOu?(E+w%1 z8*6!AmG+OKz3!Ltff{U#Kp9Ur4_h;qHfd>d1e9EhICaXet*JMQ;JToP#h`sL=ngFvgYJt# z_r+k?YqhBf#bAcTV0bZEW zsdp$B}VHqDNb4=J~X?SfqTr@FB^J!Sf*+`@divzyQE?IE5}Y<<4&ozup$ zY)3zhK5pvU(aG#|J}uhfw`yre57%~MM{fB>KaaZI)Bn8V@~_x?bJ*{k9{tzOP)QN$ zRt)P___`ILZbhnF(bp}dYFcrMRc}YJNoCqbpd8kc^tB{IEy+|%vacnn(mvf$lsW*J zA&_%9RADhxbU9SX8oP literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_os.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_delete_volume_os.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..5ec5429411f55ab401b3535361276da36d7a593a GIT binary patch literal 472 zcmV;}0Vn<+iwFqC^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUu0!$Wprg`0sZ)jM-9tN>9&pii^kJHHuoJi(Qmuz^W2P(^ zAAEL)iM{jLKdW!tWbYr~0wZt?bla}yVH<|AZNaXujSk>A{0>gQA15K*xB^y5+i7jQ zZ?&Drkbo;`qXLgRx7~I&9&0t#6drb6xWb$vjjdx0dJKB}L|DcVmkBz{B=ix872x}L zot1ds+z&{a5UN*lbrr)-aNzVbaj3?OkwV}XM?}-bnIktd~!s=;}fk5 ztQ^H8SrD=!ZajzcoI&TkUw5xqa)vB9r}kXzS#o(`$w`uHJB*0LcYD>hI7WgjS4Ew( za%q?JFiwFqC^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUu0!$Wprgu9kpRg&XnMgRMFDN(A_WotLIhQU}LJiPbZd+te=73FhM*)4HPRvhZ4I%Z8# zk&=={mgR?gTSbknn>sqVi{eyA`yzWRtKyGma^rrH5=fRLcgK9!n+ zZBugcYbnU$zLaErmZVu`>pIDb`DamzWbuV(WP_;+6%@<&_Y&&wIE7Z=e*RTRy|X`kcu$N4$Fy*c~vadUQ#kc=#L zC2+EJ*jpf5A1DYHDM)`>OtwBWaItNDK%g*Yw_&(zu1QP** zAp^Je^!7~$!eS4iCLkI?4DR5x1K|)rI71qfEZyyr#9?sm?}``cxwcPvW`9YWhk>E1 z%gyAIudmjt-}0(CraO$Tp1uIoNKGMLS}^faFw2+5s&`9yuMJbF<EXUTx;?ng zs=RFS;xL^2B`^cTI{(wI4mUr3x}S-^ttpqtC6C8ka^<+>EpevfQqaJqaID^?b?ukC zbhJ}2>|0N+5Mih6ce-<0=7stP=Klu)pCu?XCMe7}K}lVh61=`x7pCMPOeuV`Y;BV_ zcJKQ|`UaP=dqDA0A*$39RO3)%FT5B!)R+a-n7hr_c4r%lh_P39jLp56FFXlD;PX#Y zh<(4uirfa$U!*@q5yV~uDu_V!B2X^XR)K0FVud1BK1UTqta=eE9m=N6cPg8v7^a4C zm>RoF`$%-?F5P#Rj(yp7_TWpVeT}pxD@xLyvN0YO%=m(tP%zUM%e;()_k1)l=W91e zvS#+f>|IstXIHD6sjYR-`KnS5%X8)g9kYp-1^yn5W3}L zGLC`cJO=IuGVm~wf%hXBoK=U?InP>wE8Vso%7E@AyGLpYtUa)wh#nfP4zg`yIc;O<+gP@3Ec-T=LmRtfM{PS=qQ3yp KP&%@i3IG7dNB(#K literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_os.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_ensure_export_os.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..3c5813515bdf20fde809d40b20e5c0679ed5859e GIT binary patch literal 439 zcmV;o0Z9HIiwFqD^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUuAA{b#i52Wq5FJa&%vBb1rZI%~8v0!!QuM?^lFe>_B2HDSoKe^pZn+XrL#9aTL{Y zl3K!&^Pu$KyHe6ZODLuEP=n>&+12dqSO}$0WhZOEL#Wo+t`}BypkoMyXxf$RA}ftG z(E$^6FwwITTivPS3GOfgO+b|8vTt^_Y0DDSawCNWP2t{Z1`m1);Yug42+B$bX-p|p z-`W6l>IxYctn8~@FAZY3!#FnU%C0ddC}XDyff|DvKc1FQ1C-GZaifdm2~>9k~&K^s%eplo@?2!GvgYb{ zS9RGIvx{ykvsYV{9bRwqY<{pPtID701y!8K<{&GauXOrm%`dl}g`o`_(>+=a+PflhhyQb+*?$0>V;TQX2k9x3EJTahgp8hC6=1_Xu>yAgZNzbXf zt1I_w*}WSWdinfv^jFVc&d+|UcinBdB6RI|00^5K0mZQcTO2EN^4L24_Bg*b7Io>Q zuy%I$?mA2zCT^=;z3u8|GtB%oVTKU%`cHSZIsfVL`=~y-r!prlWwFnt)SgQjh_gMH zN<=P|XZ5OVTfSVCdv7Wv-7>jK#W(Zxrk^J;uk>e_{|^NGZGp>~;aAPCkj@3&vcijz!V6M(!3r;;!i%Kv zBIhJIrOak+7dktWLRh7QmRZ>jbkl_s>X)WvC*b83qvdH@p62E0xICSfr&UO(K7{7w z>5&99NkE73Q357Oz(CwcgHLWG#lI-mZrH{8{bq&(6jvs=!Zt@FNFRyWW8_5K=3 z=#AP(Z*q^`1kgI7HwnFcTELd5vrL_h)Y(LxRdL-m|Gw+CEEGF3K}#lR*#tpE7X*=P z(EC8P42lKeg&5QdVniPZ;txdsx=w-25Qs|6i|gy;%0Lyz$MPj-KNgTB0LV~}qA36* zR{%)101$m)%EYE5V1AFL!QVUw{yMHsV8Z;PTmYmU1>`LSjC%^0HWjGWJkrCXR2!=U zkcq)QL?Q+UVtff&hB60oNL3~9LUonWUAU8gBE7v(HCs6c9Dc=3*Armlw zp6 zGlSI`OxpBC;xpPbe1bKBcazSr=FuV6Btne+&V<-tvvzE=mTlGsls4I{9cqoMElbZKF1X?kT}V{dM5Wn*+{Z*E_2b1rZI%~8v0!!QuM?^lFe>_B2HDSoKe^pZn+ zXrL#9aTL{Yl3K!&^Pu$KyHe6ZODLuEP=n>&+12dqSO}$0WhZOEL#Wo+t`}BypkoMy zXxf$RA}ftG(E$^6FwwITTivPS3GOfgO+b|8vTt^_Y0DDSawCNWP2t{Z1`m1);Yug4 z2+B$bX-p|p-`W6l>IxYctn8~@FAZY3!#FnU%C0ddC}XDyff|DvKc1FQ1C-GZaifdm2~>9k~&K^s%eplo@?2!GbZKF1X?kT}V{dM5Wn*+{Z*E_AZD}rW0Nq&MZ`(Eye%D`N}!F98f!R$VQ&{?i?VOpY?o&TWm#{RRXgjeuJ2}7?n86c%{EPWzH6I5ud`$KSGE&bIiJ71 zsMmeHozJu8?H^UyXFJGF`W0=SXYT)*yF8)mkCVNR^n256>+`CmKc7wL`t<4SY1=gY z)2mHQpFizAp{LLGKYlvfe?lf9J6P`oW-DiVC9)MeDx~vNC|<3|R_vN^zOC3HVaTIb z^P|J#s(;@s$&4j4kl!=|!kGb}cK`=7AV)JmC~T`M_d57nngZK9;HaXPd!PFYh8MCo z7sA{vgo2D93=1KS3!!v&cNYXQa!go!c_Gxa5Sr{{(RJP1;lgnr*4;Th!7gi^J-& zE~}S|{(WS|i)UxazxwpW>Cx-D?JpK<@~%rC5V=(*7sa#^wwOw&aB3_4ZmO2Xf?iaZ zTRXdazw}G{g}bt?cYWP#LE0q)I$!7b$YrV4Mmbv*KgegOHe0N_@D1(Ux>$ghd~n#r$6^6Nl; zEtxFqYRg>FjFB#~BasoE=krz7*L}6|cQwC4`JK~8X1GZhZiwNA8E!^~o4|0RxHueA z^dN3O`Z zOiFUDs3zxw?>aF&G8f#kfI5UCJe6!G$Eo1q`S|T%TTI?QyJDuk=R1269PSSI93XQ zp;QPAwL;)nEcO`Om>CQ*YtR`3;jSqvXuJb9Rcqc`>GxKKy_IoqNr672M$Es&5Thc8 z*8?gtqT)npjKnF^LnPV$=Ol58NN_kgycvZ;?p~z#QjJkKVHl%unlO^YDZ@w-mEu}3 zu7$_72)Gsz*Mh3gt3~UI=+SQ{Q=$@>(H6@>;NsU@2;;eo3>x9**#C8R6_)di0~>Rf{ci$BSOdWpw*ZAC!u`` z>okQmr?B=ZY(ff~n8GHeu!$*b(iApKRx`XF$Z8^44W%%}&K;`HUMD0+2ySNy8kgR! z>%sZ9Z-kw&ymo)j98hjYtoteGro4sgt=zHfj`dF1;Es*%ko4ftD5srr&aHCpd*wVd z%X#dUUHu>MC!fAe!vVSW?wZtHOYZu41e~Z0?mAr=-KCMjHyhxDg}aSAYYqs-rUnB# zm@?5+s5t=nw~8FNxBw)@1ps-bk=~uLxB!IW0uYJ|KqxMdjx4}43;3Z0!qftBYyqBI z4y$$5SJa$=A70=w(+l|V1=9J2FDu_?L8>FbIs#rtAm|809RV4N0Lbs$BY+>BAf26n zhbQpU6NK>z;`{`p7y~z5Fb{B3(HsoQ87EccNVxZ@s1?s+%j`-Rdl>_1d-b`Lesd zo3)nV!Bi2E6%oB6Vo*ekiio6$ooaySf`kE%Ck-d)~U!kMa{~Gi>kfmL*U{5 zjdQ!bQ`c#g{p5$qySCX(UK}0I4;TI7mwH`IuHIK|H6cPyewqxYq5ts-Dlf73Kbrgz6gI-2*(XYihLjeK^p-}YMH7@3oxtBC Q-|jN|8%XL+ffpJ80D7U+Jpcdz literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_os.p.gz b/cinder/tests/windows/stubs/test_windows.TestWindowsDriver.test_remove_export_os.p.gz new file mode 100644 index 0000000000000000000000000000000000000000..64fdc6bef4f16c901c05ca5186677a35d3bb2bc3 GIT binary patch literal 439 zcmV;o0Z9HIiwFqE^vF;G|8!+@bYFLAZe(wFb1qb6b97f}Ze(wFb3}4!c4cxdbY*jN zUvgz_Z+2y0Wq5FJa&%vBb1rZI%~8v0!!QuM?^lFe>_B2HDSoKe^pZn+XrL#9aTL{Y zl3K!&^Pu$KyHe6ZODLuEP=n>&+12dqSO}$0WhZOEL#Wo+t`}BypkoMyXxf$RA}ftG z(E$^6FwwITTivPS3GOfgO+b|8vTt^_Y0DDSawCNWP2t{Z1`m1);Yug42+B$bX-p|p z-`W6l>IxYctn8~@FAZY3!#FnU%C0ddC}XDyff|DvKc1FQ1C-GZaifdm2~>9k~&K^s%eplo@?2!Gdm#=%{te1?d;y)H23Xn-BdT*UGv97cJBYnwvbhe#no-S>gvs6ku_Jp zyQ<5!m|b*Rk-gf=?C^T4viZTHtSZ;Gi>kTZbO+hCpf7aR`fu*a)vas&XNEqr{^2+0 zXZ{v)x(Dzd9A5h0OMf|{>$A!91N~h$oBGD>=-=lPx}KcOPIgVxo!qZ$y8k#mqNgtp zK7719I3kmn9j>+@vZc4Zglx% z#PC9Kk(^RxGq($!oncV`fJeP-2ej$J_4G^AvJh}|#V9&W(P@rO$LMs5PRmeFy$Q|H z=@A1oF+hjsBLhrgfPt7tgHPs>>|c~?HzcusznNhU`7z|PX1AO*Tjyo7tZttF>+~8y z-W$2^y;0tK6P(u3dy~AkPxIFjbe5sB5jvZovogkQ)%T6tGEeMC11)KwWetQpiXh}k z1-%Sp%RpGjJr4s}As@j9IpGI+|GG|w%wUK*&5P^nB+5Vy$J_E9XWtf(A^?ccj-n<2 zBvAlJssIpWVamY98en*jra|321pYeiO<=-gWEViJc+JZ`Q3trG;qSV)Tqk=bH<3}f1I;jYgmY4YcB(L-FBro=l zbL(?{6+A~3{UDVlsbUzVyx1?p00m>k2bO#Ynh#0zVNiY0NxML+Z`6E2vrK{3V|NX^ z8lME{~TJ0 QW9`-KFR{KanhOyC0K1;wHvj+t literal 0 HcmV?d00001 diff --git a/cinder/tests/windows/windowsutils.py b/cinder/tests/windows/windowsutils.py new file mode 100644 index 0000000000..4ef3212ebb --- /dev/null +++ b/cinder/tests/windows/windowsutils.py @@ -0,0 +1,145 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Pedro Navarro Perez +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Windows storage classes to be used in testing. +""" + +import os +import sys + +from cinder import flags + +# Check needed for unit testing on Unix +if os.name == 'nt': + import wmi + +FLAGS = flags.FLAGS + + +class WindowsUtils(object): + def __init__(self): + self.__conn_cimv2 = None + self.__conn_wmi = None + + @property + def _conn_cimv2(self): + if self.__conn_cimv2 is None: + self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') + return self.__conn_cimv2 + + @property + def _conn_wmi(self): + if self.__conn_wmi is None: + self.__conn_wmi = wmi.WMI(moniker='//./root/wmi') + return self.__conn_wmi + + def find_vhd_by_name(self, name): + '''Finds a volume by its name.''' + + wt_disks = self._conn_wmi.WT_Disk(Description=name) + return wt_disks + + def volume_exists(self, name): + '''Checks if a volume exists.''' + + wt_disks = self.find_vhd_by_name(name) + if len(wt_disks) > 0: + return True + return False + + def snapshot_exists(self, name): + '''Checks if a snapshot exists.''' + + wt_snapshots = self.find_snapshot_by_name(name) + if len(wt_snapshots) > 0: + return True + return False + + def find_snapshot_by_name(self, name): + '''Finds a snapshot by its name.''' + + wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name) + return wt_snapshots + + def delete_volume(self, name): + '''Deletes a volume.''' + + wt_disk = self._conn_wmi.WT_Disk(Description=name)[0] + wt_disk.Delete_() + vhdfiles = self._conn_cimv2.query( + "Select * from CIM_DataFile where Name = '" + + self._get_vhd_path(name) + "'") + if len(vhdfiles) > 0: + vhdfiles[0].Delete() + + def _get_vhd_path(self, volume_name): + '''Gets the path disk of the volume.''' + + base_vhd_folder = FLAGS.windows_iscsi_lun_path + return os.path.join(base_vhd_folder, volume_name + ".vhd") + + def delete_snapshot(self, name): + '''Deletes a snapshot.''' + + wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0] + wt_snapshot.Delete_() + vhdfile = self._conn_cimv2.query( + "Select * from CIM_DataFile where Name = '" + + self._get_vhd_path(name) + "'")[0] + vhdfile.Delete() + + def find_initiator_ids(self, target_name, initiator_name): + '''Finds a initiator id by its name.''' + wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name, + Method=4, + Value=initiator_name) + return wt_idmethod + + def initiator_id_exists(self, target_name, initiator_name): + '''Checks if a initiatorId exists.''' + + wt_idmethod = self.find_initiator_ids(target_name, initiator_name) + if len(wt_idmethod) > 0: + return True + return False + + def find_exports(self, target_name): + '''Finds a export id by its name.''' + + wt_host = self._conn_wmi.WT_Host(HostName=target_name) + return wt_host + + def export_exists(self, target_name): + '''Checks if a export exists.''' + + wt_host = self.find_exports(target_name) + if len(wt_host) > 0: + return True + return False + + def delete_initiator_id(self, target_name, initiator_name): + '''Deletes a initiatorId.''' + + wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0] + wt_init_id.Delete_() + + def delete_export(self, target_name): + '''Deletes an export.''' + + wt_host = self.find_exports(target_name)[0] + wt_host.RemoveAllWTDisks() + wt_host.Delete_() diff --git a/cinder/tests/xenapi/__init__.py b/cinder/tests/xenapi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/utils.py b/cinder/utils.py new file mode 100644 index 0000000000..281dd65864 --- /dev/null +++ b/cinder/utils.py @@ -0,0 +1,1216 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utilities and helper functions.""" + +import contextlib +import datetime +import errno +import functools +import hashlib +import inspect +import itertools +import os +import paramiko +import pyclbr +import random +import re +import shlex +import shutil +import signal +import socket +import struct +import sys +import tempfile +import time +import types +import warnings +from xml.dom import minidom +from xml.parsers import expat +from xml import sax +from xml.sax import expatreader +from xml.sax import saxutils + +from eventlet import event +from eventlet.green import subprocess +from eventlet import greenthread +from eventlet import pools + +from cinder import exception +from cinder import flags +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import lockutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) +ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" +PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" +FLAGS = flags.FLAGS + +synchronized = lockutils.synchronized_with_prefix('cinder-') + + +def find_config(config_path): + """Find a configuration file using the given hint. + + :param config_path: Full or relative path to the config. + :returns: Full path of the config, if it exists. + :raises: `cinder.exception.ConfigNotFound` + + """ + possible_locations = [ + config_path, + os.path.join(FLAGS.state_path, "etc", "cinder", config_path), + os.path.join(FLAGS.state_path, "etc", config_path), + os.path.join(FLAGS.state_path, config_path), + "/etc/cinder/%s" % config_path, + ] + + for path in possible_locations: + if os.path.exists(path): + return os.path.abspath(path) + + raise exception.ConfigNotFound(path=os.path.abspath(config_path)) + + +def fetchfile(url, target): + LOG.debug(_('Fetching %s') % url) + execute('curl', '--fail', url, '-o', target) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def execute(*cmd, **kwargs): + """Helper method to execute command with optional retry. + + If you add a run_as_root=True command, don't forget to add the + corresponding filter to etc/cinder/rootwrap.d ! + + :param cmd: Passed to subprocess.Popen. + :param process_input: Send to opened process. + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + exception.ProcessExecutionError unless + program exits with one of these code. + :param delay_on_retry: True | False. Defaults to True. If set to + True, wait a short amount of time + before retrying. + :param attempts: How many times to retry cmd. + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper FLAG. + + :raises exception.Error: on receiving unknown arguments + :raises exception.ProcessExecutionError: + + :returns: a tuple, (stdout, stderr) from the spawned process, or None if + the command fails. + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + shell = kwargs.pop('shell', False) + + if len(kwargs): + raise exception.Error(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root: + + if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo': + LOG.deprecated(_('The root_helper option (which lets you specify ' + 'a root wrapper different from cinder-rootwrap, ' + 'and defaults to using sudo) is now deprecated. ' + 'You should use the rootwrap_config option ' + 'instead.')) + + if (FLAGS.rootwrap_config is not None): + cmd = ['sudo', 'cinder-rootwrap', + FLAGS.rootwrap_config] + list(cmd) + else: + cmd = shlex.split(FLAGS.root_helper) + list(cmd) + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=True, + preexec_fn=_subprocess_setup, + shell=shell) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise exception.ProcessExecutionError( + exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except exception.ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """ + A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except exception.ProcessExecutionError, exn: + out, err = '', str(exn) + LOG.debug(err) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + LOG.debug(err) + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_('Running cmd (SSH): %s'), cmd) + if addl_env: + raise exception.Error(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise exception.Error(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + #stdin.write('process_input would go here') + #stdin.flush() + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + stdout_stream.close() + stderr_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise exception.ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + channel.close() + return (stdout, stderr) + + +def create_channel(client, width, height): + """Invoke an interactive shell session on server.""" + channel = client.invoke_shell() + channel.resize_pty(width, height) + return channel + + +class SSHPool(pools.Pool): + """A simple eventlet pool to hold ssh connections.""" + + def __init__(self, ip, port, conn_timeout, login, password=None, + privatekey=None, *args, **kwargs): + self.ip = ip + self.port = port + self.login = login + self.password = password + self.conn_timeout = conn_timeout if conn_timeout else None + self.privatekey = privatekey + super(SSHPool, self).__init__(*args, **kwargs) + + def create(self): + try: + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if self.password: + ssh.connect(self.ip, + port=self.port, + username=self.login, + password=self.password, + timeout=self.conn_timeout) + elif self.privatekey: + pkfile = os.path.expanduser(self.privatekey) + privatekey = paramiko.RSAKey.from_private_key_file(pkfile) + ssh.connect(self.ip, + port=self.port, + username=self.login, + pkey=privatekey, + timeout=self.conn_timeout) + else: + msg = _("Specify a password or private_key") + raise exception.CinderException(msg) + + # Paramiko by default sets the socket timeout to 0.1 seconds, + # ignoring what we set thru the sshclient. This doesn't help for + # keeping long lived connections. Hence we have to bypass it, by + # overriding it after the transport is initialized. We are setting + # the sockettimeout to None and setting a keepalive packet so that, + # the server will keep the connection open. All that does is send + # a keepalive packet every ssh_conn_timeout seconds. + if self.conn_timeout: + transport = ssh.get_transport() + transport.sock.settimeout(None) + transport.set_keepalive(self.conn_timeout) + return ssh + except Exception as e: + msg = _("Error connecting via ssh: %s") % e + LOG.error(msg) + raise paramiko.SSHException(msg) + + def get(self): + """ + Return an item from the pool, when one is available. This may + cause the calling greenthread to block. Check if a connection is active + before returning it. For dead connections create and return a new + connection. + """ + if self.free_items: + conn = self.free_items.popleft() + if conn: + if conn.get_transport().is_active(): + return conn + else: + conn.close() + return self.create() + if self.current_size < self.max_size: + created = self.create() + self.current_size += 1 + return created + return self.channel.get() + + def remove(self, ssh): + """Close an ssh client and remove it from free_items.""" + ssh.close() + ssh = None + if ssh in self.free_items: + self.free_items.pop(ssh) + if self.current_size > 0: + self.current_size -= 1 + + +def cinderdir(): + import cinder + return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0] + + +def debug(arg): + LOG.debug(_('debug in callback: %s'), arg) + return arg + + +def generate_uid(topic, size=8): + characters = '01234567890abcdefghijklmnopqrstuvwxyz' + choices = [random.choice(characters) for x in xrange(size)] + return '%s-%s' % (topic, ''.join(choices)) + + +# Default symbols to use for passwords. Avoids visually confusing characters. +# ~6 bits per symbol +DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O + 'abcdefghijkmnopqrstuvwxyz') # Removed: l + + +# ~5 bits per symbol +EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O + + +def last_completed_audit_period(unit=None): + """This method gives you the most recently *completed* audit period. + + arguments: + units: string, one of 'hour', 'day', 'month', 'year' + Periods normally begin at the beginning (UTC) of the + period unit (So a 'day' period begins at midnight UTC, + a 'month' unit on the 1st, a 'year' on Jan, 1) + unit string may be appended with an optional offset + like so: 'day@18' This will begin the period at 18:00 + UTC. 'month@15' starts a monthly period on the 15th, + and year@3 begins a yearly one on March 1st. + + + returns: 2 tuple of datetimes (begin, end) + The begin timestamp of this audit period is the same as the + end of the previous.""" + if not unit: + unit = FLAGS.volume_usage_audit_period + + offset = 0 + if '@' in unit: + unit, offset = unit.split("@", 1) + offset = int(offset) + + rightnow = timeutils.utcnow() + if unit not in ('month', 'day', 'year', 'hour'): + raise ValueError('Time period must be hour, day, month or year') + if unit == 'month': + if offset == 0: + offset = 1 + end = datetime.datetime(day=offset, + month=rightnow.month, + year=rightnow.year) + if end >= rightnow: + year = rightnow.year + if 1 >= rightnow.month: + year -= 1 + month = 12 + (rightnow.month - 1) + else: + month = rightnow.month - 1 + end = datetime.datetime(day=offset, + month=month, + year=year) + year = end.year + if 1 >= end.month: + year -= 1 + month = 12 + (end.month - 1) + else: + month = end.month - 1 + begin = datetime.datetime(day=offset, month=month, year=year) + + elif unit == 'year': + if offset == 0: + offset = 1 + end = datetime.datetime(day=1, month=offset, year=rightnow.year) + if end >= rightnow: + end = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 1) + begin = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 2) + else: + begin = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 1) + + elif unit == 'day': + end = datetime.datetime(hour=offset, + day=rightnow.day, + month=rightnow.month, + year=rightnow.year) + if end >= rightnow: + end = end - datetime.timedelta(days=1) + begin = end - datetime.timedelta(days=1) + + elif unit == 'hour': + end = rightnow.replace(minute=offset, second=0, microsecond=0) + if end >= rightnow: + end = end - datetime.timedelta(hours=1) + begin = end - datetime.timedelta(hours=1) + + return (begin, end) + + +def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): + """Generate a random password from the supplied symbol groups. + + At least one symbol from each group will be included. Unpredictable + results if length is less than the number of symbol groups. + + Believed to be reasonably secure (with a reasonable password length!) + + """ + r = random.SystemRandom() + + # NOTE(jerdfelt): Some password policies require at least one character + # from each group of symbols, so start off with one random character + # from each symbol group + password = [r.choice(s) for s in symbolgroups] + # If length < len(symbolgroups), the leading characters will only + # be from the first length groups. Try our best to not be predictable + # by shuffling and then truncating. + r.shuffle(password) + password = password[:length] + length -= len(password) + + # then fill with random characters from all symbol groups + symbols = ''.join(symbolgroups) + password.extend([r.choice(symbols) for _i in xrange(length)]) + + # finally shuffle to ensure first x characters aren't from a + # predictable group + r.shuffle(password) + + return ''.join(password) + + +def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): + # Use the same implementation as the password generation. + return generate_password(length, symbolgroups) + + +def last_octet(address): + return int(address.split('.')[-1]) + + +def get_my_linklocal(interface): + try: + if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) + condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' + links = [re.search(condition, x) for x in if_str[0].split('\n')] + address = [w.group(1) for w in links if w is not None] + if address[0] is not None: + return address[0] + else: + raise exception.Error(_('Link Local address is not found.:%s') + % if_str) + except Exception as ex: + raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" + " :%(ex)s") % locals()) + + +def parse_mailmap(mailmap='.mailmap'): + mapping = {} + if os.path.exists(mailmap): + fp = open(mailmap, 'r') + for l in fp: + l = l.strip() + if not l.startswith('#') and ' ' in l: + canonical_email, alias = l.split(' ') + mapping[alias.lower()] = canonical_email.lower() + return mapping + + +def str_dict_replace(s, mapping): + for s1, s2 in mapping.iteritems(): + s = s.replace(s1, s2) + return s + + +class LazyPluggable(object): + """A pluggable backend loaded lazily based on some value.""" + + def __init__(self, pivot, **backends): + self.__backends = backends + self.__pivot = pivot + self.__backend = None + + def __get_backend(self): + if not self.__backend: + backend_name = FLAGS[self.__pivot] + if backend_name not in self.__backends: + raise exception.Error(_('Invalid backend: %s') % backend_name) + + backend = self.__backends[backend_name] + if isinstance(backend, tuple): + name = backend[0] + fromlist = backend[1] + else: + name = backend + fromlist = backend + + self.__backend = __import__(name, None, None, fromlist) + LOG.debug(_('backend %s'), self.__backend) + return self.__backend + + def __getattr__(self, key): + backend = self.__get_backend() + return getattr(backend, key) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + self.f(*self.args, **self.kw) + if not self._running: + break + greenthread.sleep(interval) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class ProtectedExpatParser(expatreader.ExpatParser): + """An expat parser which disables DTD's and entities by default.""" + + def __init__(self, forbid_dtd=True, forbid_entities=True, + *args, **kwargs): + # Python 2.x old style class + expatreader.ExpatParser.__init__(self, *args, **kwargs) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise ValueError("Inline DTD forbidden") + + def entity_decl(self, entityName, is_parameter_entity, value, base, + systemId, publicId, notationName): + raise ValueError(" forbidden") + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise ValueError(" forbidden") + + def reset(self): + expatreader.ExpatParser.reset(self) + if self.forbid_dtd: + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + if self.forbid_entities: + self._parser.EntityDeclHandler = self.entity_decl + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + + +def safe_minidom_parse_string(xml_string): + """Parse an XML string using minidom safely. + + """ + try: + return minidom.parseString(xml_string, parser=ProtectedExpatParser()) + except sax.SAXParseException as se: + raise expat.ExpatError() + + +def xhtml_escape(value): + """Escapes a string so it is valid within XML or XHTML. + + """ + return saxutils.escape(value, {'"': '"', "'": '''}) + + +def utf8(value): + """Try to turn a string into utf-8 if possible. + + Code is directly from the utf8 function in + http://github.com/facebook/tornado/blob/master/tornado/escape.py + + """ + if isinstance(value, unicode): + return value.encode('utf-8') + assert isinstance(value, str) + return value + + +def delete_if_exists(pathname): + """delete a file, but ignore file not found error""" + + try: + os.unlink(pathname) + except OSError as e: + if e.errno == errno.ENOENT: + return + else: + raise + + +def get_from_path(items, path): + """Returns a list of items matching the specified path. + + Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item + in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the + intermediate results are lists it will treat each list item individually. + A 'None' in items or any child expressions will be ignored, this function + will not throw because of None (anywhere) in items. The returned list + will contain no None values. + + """ + if path is None: + raise exception.Error('Invalid mini_xpath') + + (first_token, sep, remainder) = path.partition('/') + + if first_token == '': + raise exception.Error('Invalid mini_xpath') + + results = [] + + if items is None: + return results + + if not isinstance(items, list): + # Wrap single objects in a list + items = [items] + + for item in items: + if item is None: + continue + get_method = getattr(item, 'get', None) + if get_method is None: + continue + child = get_method(first_token) + if child is None: + continue + if isinstance(child, list): + # Flatten intermediate lists + for x in child: + results.append(x) + else: + results.append(child) + + if not sep: + # No more tokens + return results + else: + return get_from_path(results, remainder) + + +def flatten_dict(dict_, flattened=None): + """Recursively flatten a nested dictionary.""" + flattened = flattened or {} + for key, value in dict_.iteritems(): + if hasattr(value, 'iteritems'): + flatten_dict(value, flattened) + else: + flattened[key] = value + return flattened + + +def partition_dict(dict_, keys): + """Return two dicts, one with `keys` the other with everything else.""" + intersection = {} + difference = {} + for key, value in dict_.iteritems(): + if key in keys: + intersection[key] = value + else: + difference[key] = value + return intersection, difference + + +def map_dict_keys(dict_, key_map): + """Return a dict in which the dictionaries keys are mapped to new keys.""" + mapped = {} + for key, value in dict_.iteritems(): + mapped_key = key_map[key] if key in key_map else key + mapped[mapped_key] = value + return mapped + + +def subset_dict(dict_, keys): + """Return a dict that only contains a subset of keys.""" + subset = partition_dict(dict_, keys)[0] + return subset + + +def check_isinstance(obj, cls): + """Checks that obj is of type cls, and lets PyLint infer types.""" + if isinstance(obj, cls): + return obj + raise Exception(_('Expected object of type: %s') % (str(cls))) + # TODO(justinsb): Can we make this better?? + return cls() # Ugly PyLint hack + + +def is_valid_boolstr(val): + """Check if the provided string is a valid bool string or not. """ + val = str(val).lower() + return (val == 'true' or val == 'false' or + val == 'yes' or val == 'no' or + val == 'y' or val == 'n' or + val == '1' or val == '0') + + +def is_valid_ipv4(address): + """valid the address strictly as per format xxx.xxx.xxx.xxx. + where xxx is a value between 0 and 255. + """ + parts = address.split(".") + if len(parts) != 4: + return False + for item in parts: + try: + if not 0 <= int(item) <= 255: + return False + except ValueError: + return False + return True + + +def monkey_patch(): + """ If the Flags.monkey_patch set as True, + this function patches a decorator + for all functions in specified modules. + You can set decorators for each modules + using FLAGS.monkey_patch_modules. + The format is "Module path:Decorator function". + Example: 'cinder.api.ec2.cloud:' \ + cinder.openstack.common.notifier.api.notify_decorator' + + Parameters of the decorator is as follows. + (See cinder.openstack.common.notifier.api.notify_decorator) + + name - name of the function + function - object of the function + """ + # If FLAGS.monkey_patch is not True, this function do nothing. + if not FLAGS.monkey_patch: + return + # Get list of modules and decorators + for module_and_decorator in FLAGS.monkey_patch_modules: + module, decorator_name = module_and_decorator.split(':') + # import decorator function + decorator = importutils.import_class(decorator_name) + __import__(module) + # Retrieve module information using pyclbr + module_data = pyclbr.readmodule_ex(module) + for key in module_data.keys(): + # set the decorator for the class methods + if isinstance(module_data[key], pyclbr.Class): + clz = importutils.import_class("%s.%s" % (module, key)) + for method, func in inspect.getmembers(clz, inspect.ismethod): + setattr( + clz, method, + decorator("%s.%s.%s" % (module, key, method), func)) + # set the decorator for the function + if isinstance(module_data[key], pyclbr.Function): + func = importutils.import_class("%s.%s" % (module, key)) + setattr(sys.modules[module], key, + decorator("%s.%s" % (module, key), func)) + + +def convert_to_list_dict(lst, label): + """Convert a value or list into a list of dicts""" + if not lst: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + +def timefunc(func): + """Decorator that logs how long a particular function took to execute""" + @functools.wraps(func) + def inner(*args, **kwargs): + start_time = time.time() + try: + return func(*args, **kwargs) + finally: + total_time = time.time() - start_time + LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") % + dict(name=func.__name__, total_time=total_time)) + return inner + + +def generate_glance_url(): + """Generate the URL to glance.""" + # TODO(jk0): This will eventually need to take SSL into consideration + # when supported in glance. + return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) + + +@contextlib.contextmanager +def logging_error(message): + """Catches exception, write message to the log, re-raise. + This is a common refinement of save_and_reraise that writes a specific + message to the log. + """ + try: + yield + except Exception as error: + with excutils.save_and_reraise_exception(): + LOG.exception(message) + + +@contextlib.contextmanager +def remove_path_on_error(path): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + """ + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + delete_if_exists(path) + + +def make_dev_path(dev, partition=None, base='/dev'): + """Return a path to a particular device. + + >>> make_dev_path('xvdc') + /dev/xvdc + + >>> make_dev_path('xvdc', 1) + /dev/xvdc1 + """ + path = os.path.join(base, dev) + if partition: + path += str(partition) + return path + + +def total_seconds(td): + """Local total_seconds implementation for compatibility with python 2.6""" + if hasattr(td, 'total_seconds'): + return td.total_seconds() + else: + return ((td.days * 86400 + td.seconds) * 10 ** 6 + + td.microseconds) / 10.0 ** 6 + + +def sanitize_hostname(hostname): + """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" + if isinstance(hostname, unicode): + hostname = hostname.encode('latin-1', 'ignore') + + hostname = re.sub('[ _]', '-', hostname) + hostname = re.sub('[^\w.-]+', '', hostname) + hostname = hostname.lower() + hostname = hostname.strip('.-') + + return hostname + + +def read_cached_file(filename, cache_info, reload_func=None): + """Read from a file if it has been modified. + + :param cache_info: dictionary to hold opaque cache. + :param reload_func: optional function to be called with data when + file is reloaded due to a modification. + + :returns: data from file + + """ + mtime = os.path.getmtime(filename) + if not cache_info or mtime != cache_info.get('mtime'): + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + if reload_func: + reload_func(cache_info['data']) + return cache_info['data'] + + +def file_open(*args, **kwargs): + """Open file + + see built-in file() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return file(*args, **kwargs) + + +def hash_file(file_like_object): + """Generate a hash for the contents of a file.""" + checksum = hashlib.sha1() + any(map(checksum.update, iter(lambda: file_like_object.read(32768), ''))) + return checksum.hexdigest() + + +@contextlib.contextmanager +def temporary_mutation(obj, **kwargs): + """Temporarily set the attr on a particular object to a given value then + revert when finished. + + One use of this is to temporarily set the read_deleted flag on a context + object: + + with temporary_mutation(context, read_deleted="yes"): + do_something_that_needed_deleted_objects() + """ + NOT_PRESENT = object() + + old_values = {} + for attr, new_value in kwargs.items(): + old_values[attr] = getattr(obj, attr, NOT_PRESENT) + setattr(obj, attr, new_value) + + try: + yield + finally: + for attr, old_value in old_values.items(): + if old_value is NOT_PRESENT: + del obj[attr] + else: + setattr(obj, attr, old_value) + + +def service_is_up(service): + """Check whether a service is up based on last heartbeat.""" + last_heartbeat = service['updated_at'] or service['created_at'] + # Timestamps in DB are UTC. + elapsed = total_seconds(timeutils.utcnow() - last_heartbeat) + return abs(elapsed) <= FLAGS.service_down_time + + +def generate_mac_address(): + """Generate an Ethernet MAC address.""" + # NOTE(vish): We would prefer to use 0xfe here to ensure that linux + # bridge mac addresses don't change, but it appears to + # conflict with libvirt, so we use the next highest octet + # that has the unicast and locally administered bits set + # properly: 0xfa. + # Discussion: https://bugs.launchpad.net/cinder/+bug/921838 + mac = [0xfa, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + +def read_file_as_root(file_path): + """Secure helper to read file as root.""" + try: + out, _err = execute('cat', file_path, run_as_root=True) + return out + except exception.ProcessExecutionError: + raise exception.FileNotFound(file_path=file_path) + + +@contextlib.contextmanager +def temporary_chown(path, owner_uid=None): + """Temporarily chown a path. + + :params owner_uid: UID of temporary owner (defaults to current user) + """ + if owner_uid is None: + owner_uid = os.getuid() + + orig_uid = os.stat(path).st_uid + + if orig_uid != owner_uid: + execute('chown', owner_uid, path, run_as_root=True) + try: + yield + finally: + if orig_uid != owner_uid: + execute('chown', orig_uid, path, run_as_root=True) + + +@contextlib.contextmanager +def tempdir(**kwargs): + tmpdir = tempfile.mkdtemp(**kwargs) + try: + yield tmpdir + finally: + try: + shutil.rmtree(tmpdir) + except OSError, e: + LOG.debug(_('Could not remove tmpdir: %s'), str(e)) + + +def strcmp_const_time(s1, s2): + """Constant-time string comparison. + + :params s1: the first string + :params s2: the second string + + :return: True if the strings are equal. + + This function takes two strings and compares them. It is intended to be + used when doing a comparison for authentication purposes to help guard + against timing attacks. + """ + if len(s1) != len(s2): + return False + result = 0 + for (a, b) in zip(s1, s2): + result |= ord(a) ^ ord(b) + return result == 0 + + +def walk_class_hierarchy(clazz, encountered=None): + """Walk class hierarchy, yielding most derived classes first""" + if not encountered: + encountered = [] + for subclass in clazz.__subclasses__(): + if subclass not in encountered: + encountered.append(subclass) + # drill down to leaves first + for subsubclass in walk_class_hierarchy(subclass, encountered): + yield subsubclass + yield subclass + + +class UndoManager(object): + """Provides a mechanism to facilitate rolling back a series of actions + when an exception is raised. + """ + def __init__(self): + self.undo_stack = [] + + def undo_with(self, undo_func): + self.undo_stack.append(undo_func) + + def _rollback(self): + for undo_func in reversed(self.undo_stack): + undo_func() + + def rollback_and_reraise(self, msg=None, **kwargs): + """Rollback a series of actions then re-raise the exception. + + .. note:: (sirp) This should only be called within an + exception handler. + """ + with excutils.save_and_reraise_exception(): + if msg: + LOG.exception(msg, **kwargs) + + self._rollback() + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def to_bytes(text, default=0): + """Try to turn a string into a number of bytes. Looks at the last + characters of the text to determine what conversion is needed to + turn the input text into a byte number. + + Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end) + + """ + BYTE_MULTIPLIERS = { + '': 1, + 't': 1024 ** 4, + 'g': 1024 ** 3, + 'm': 1024 ** 2, + 'k': 1024, + } + + # Take off everything not number 'like' (which should leave + # only the byte 'identifier' left) + mult_key_org = text.lstrip('-1234567890') + mult_key = mult_key_org.lower() + mult_key_len = len(mult_key) + if mult_key.endswith("b"): + mult_key = mult_key[0:-1] + try: + multiplier = BYTE_MULTIPLIERS[mult_key] + if mult_key_len: + # Empty cases shouldn't cause text[0:-0] + text = text[0:-mult_key_len] + return int(text) * multiplier + except KeyError: + msg = _('Unknown byte multiplier: %s') % mult_key_org + raise TypeError(msg) + except ValueError: + return default diff --git a/cinder/version.py b/cinder/version.py new file mode 100644 index 0000000000..ad1ebc0837 --- /dev/null +++ b/cinder/version.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pbr import version as pbr_version + +CINDER_VENDOR = "OpenStack Foundation" +CINDER_PRODUCT = "OpenStack Cinder" +CINDER_PACKAGE = None # OS distro package version suffix + +loaded = False +version_info = pbr_version.VersionInfo('cinder') +version_string = version_info.version_string diff --git a/cinder/volume/__init__.py b/cinder/volume/__init__.py new file mode 100644 index 0000000000..e810a93d48 --- /dev/null +++ b/cinder/volume/__init__.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.volume import ' elsewhere. +import cinder.flags +import cinder.openstack.common.importutils + +API = cinder.openstack.common.importutils.import_class( + cinder.flags.FLAGS.volume_api_class) diff --git a/cinder/volume/api.py b/cinder/volume/api.py new file mode 100644 index 0000000000..042ae6802a --- /dev/null +++ b/cinder/volume/api.py @@ -0,0 +1,765 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to volumes. +""" + +import functools + +from oslo.config import cfg + +from cinder.db import base +from cinder import exception +from cinder import flags +from cinder.image import glance +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +import cinder.policy +from cinder import quota +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import volume_types + +volume_host_opt = cfg.BoolOpt('snapshot_same_host', + default=True, + help='Create volume from snapshot at the host ' + 'where snapshot resides') + +FLAGS = flags.FLAGS +FLAGS.register_opt(volume_host_opt) +flags.DECLARE('storage_availability_zone', 'cinder.volume.manager') + +LOG = logging.getLogger(__name__) +GB = 1048576 * 1024 +QUOTAS = quota.QUOTAS + + +def wrap_check_policy(func): + """Check policy corresponding to the wrapped methods prior to execution + + This decorator requires the first 3 args of the wrapped function + to be (self, context, volume) + """ + @functools.wraps(func) + def wrapped(self, context, target_obj, *args, **kwargs): + check_policy(context, func.__name__, target_obj) + return func(self, context, target_obj, *args, **kwargs) + + return wrapped + + +def check_policy(context, action, target_obj=None): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + target.update(target_obj or {}) + _action = 'volume:%s' % action + cinder.policy.enforce(context, _action, target) + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def __init__(self, db_driver=None, image_service=None): + self.image_service = (image_service or + glance.get_default_image_service()) + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.volume_rpcapi = volume_rpcapi.VolumeAPI() + super(API, self).__init__(db_driver) + + def create(self, context, size, name, description, snapshot=None, + image_id=None, volume_type=None, metadata=None, + availability_zone=None, source_volume=None): + + exclusive_options = (snapshot, image_id, source_volume) + exclusive_options_set = sum(1 for option in + exclusive_options if option is not None) + if exclusive_options_set > 1: + msg = (_("May specify only one of snapshot, imageRef " + "or source volume")) + raise exception.InvalidInput(reason=msg) + + check_policy(context, 'create') + if snapshot is not None: + if snapshot['status'] != "available": + msg = _("status must be available") + raise exception.InvalidSnapshot(reason=msg) + if not size: + size = snapshot['volume_size'] + elif size < snapshot['volume_size']: + msg = _("Volume size cannot be lesser than" + " the Snapshot size") + raise exception.InvalidInput(reason=msg) + snapshot_id = snapshot['id'] + else: + snapshot_id = None + + if source_volume is not None: + if source_volume['status'] == "error": + msg = _("Unable to clone volumes that are in an error state") + raise exception.InvalidSourceVolume(reason=msg) + if not size: + size = source_volume['size'] + else: + if size < source_volume['size']: + msg = _("Clones currently must be " + ">= original volume size.") + raise exception.InvalidInput(reason=msg) + source_volid = source_volume['id'] + else: + source_volid = None + + def as_int(s): + try: + return int(s) + except (ValueError, TypeError): + return s + + # tolerate size as stringified int + size = as_int(size) + + if not isinstance(size, int) or size <= 0: + msg = (_("Volume size '%s' must be an integer and greater than 0") + % size) + raise exception.InvalidInput(reason=msg) + + if (image_id and not (source_volume or snapshot)): + # check image existence + image_meta = self.image_service.show(context, image_id) + image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB + #check image size is not larger than volume size. + if image_size_in_gb > size: + msg = _('Size of specified image is larger than volume size.') + raise exception.InvalidInput(reason=msg) + # Check image minDisk requirement is met for the particular volume + if size < image_meta.get('min_disk', 0): + msg = _('Image minDisk size is larger than the volume size.') + raise exception.InvalidInput(reason=msg) + + try: + reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size) + except exception.OverQuota as e: + overs = e.kwargs['overs'] + usages = e.kwargs['usages'] + quotas = e.kwargs['quotas'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + if 'gigabytes' in overs: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG " + "already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 's_size': size, + 'd_consumed': _consumed('gigabytes'), + 'd_quota': quotas['gigabytes']}) + raise exception.VolumeSizeExceedsAvailableQuota() + elif 'volumes' in overs: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "volume (%(d_consumed)d volumes " + "already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 'd_consumed': _consumed('volumes')}) + raise exception.VolumeLimitExceeded(allowed=quotas['volumes']) + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if not volume_type and not source_volume: + volume_type = volume_types.get_default_volume_type() + + if not volume_type and source_volume: + volume_type_id = source_volume['volume_type_id'] + else: + volume_type_id = volume_type.get('id') + + self._check_metadata_properties(context, metadata) + options = {'size': size, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': snapshot_id, + 'availability_zone': availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description, + 'volume_type_id': volume_type_id, + 'metadata': metadata, + 'source_volid': source_volid} + + try: + volume = self.db.volume_create(context, options) + QUOTAS.commit(context, reservations) + except Exception: + with excutils.save_and_reraise_exception(): + try: + self.db.volume_destroy(context, volume['id']) + finally: + QUOTAS.rollback(context, reservations) + + request_spec = {'volume_properties': options, + 'volume_type': volume_type, + 'volume_id': volume['id'], + 'snapshot_id': volume['snapshot_id'], + 'image_id': image_id, + 'source_volid': volume['source_volid']} + + filter_properties = {} + + self._cast_create_volume(context, request_spec, filter_properties) + + return volume + + def _cast_create_volume(self, context, request_spec, filter_properties): + + # NOTE(Rongze Zhu): It is a simple solution for bug 1008866 + # If snapshot_id is set, make the call create volume directly to + # the volume host where the snapshot resides instead of passing it + # through the scheduler. So snapshot can be copy to new volume. + + source_volid = request_spec['source_volid'] + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + + if snapshot_id and FLAGS.snapshot_same_host: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + source_volume_ref = self.db.volume_get(context, + snapshot_ref['volume_id']) + now = timeutils.utcnow() + values = {'host': source_volume_ref['host'], 'scheduled_at': now} + volume_ref = self.db.volume_update(context, volume_id, values) + + # bypass scheduler and send request directly to volume + self.volume_rpcapi.create_volume( + context, + volume_ref, + volume_ref['host'], + request_spec=request_spec, + filter_properties=filter_properties, + allow_reschedule=False, + snapshot_id=snapshot_id, + image_id=image_id) + elif source_volid: + source_volume_ref = self.db.volume_get(context, + source_volid) + now = timeutils.utcnow() + values = {'host': source_volume_ref['host'], 'scheduled_at': now} + volume_ref = self.db.volume_update(context, volume_id, values) + + # bypass scheduler and send request directly to volume + self.volume_rpcapi.create_volume( + context, + volume_ref, + volume_ref['host'], + request_spec=request_spec, + filter_properties=filter_properties, + allow_reschedule=False, + snapshot_id=snapshot_id, + image_id=image_id, + source_volid=source_volid) + else: + self.scheduler_rpcapi.create_volume( + context, + FLAGS.volume_topic, + volume_id, + snapshot_id, + image_id, + request_spec=request_spec, + filter_properties=filter_properties) + + @wrap_check_policy + def delete(self, context, volume, force=False): + if context.is_admin and context.project_id != volume['project_id']: + project_id = volume['project_id'] + else: + project_id = context.project_id + + volume_id = volume['id'] + if not volume['host']: + # NOTE(vish): scheduling failed, so delete it + # Note(zhiteng): update volume quota reservation + try: + reservations = QUOTAS.reserve(context, + project_id=project_id, + volumes=-1, + gigabytes=-volume['size']) + except Exception: + reservations = None + LOG.exception(_("Failed to update quota for deleting volume")) + self.db.volume_destroy(context.elevated(), volume_id) + + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + return + if not force and volume['status'] not in ["available", "error", + "error_restoring"]: + msg = _("Volume status must be available or error") + raise exception.InvalidVolume(reason=msg) + + snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) + if len(snapshots): + msg = _("Volume still has %d dependent snapshots") % len(snapshots) + raise exception.InvalidVolume(reason=msg) + + now = timeutils.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + + self.volume_rpcapi.delete_volume(context, volume) + + @wrap_check_policy + def update(self, context, volume, fields): + self.db.volume_update(context, volume['id'], fields) + + def get(self, context, volume_id): + rv = self.db.volume_get(context, volume_id) + glance_meta = rv.get('volume_glance_metadata', None) + volume = dict(rv.iteritems()) + check_policy(context, 'get', volume) + + # NOTE(jdg): As per bug 1115629 iteritems doesn't pick + # up the glance_meta dependency, add it explicitly if + # it exists in the rv + if glance_meta: + volume['volume_glance_metadata'] = glance_meta + + return volume + + def get_all(self, context, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters={}): + check_policy(context, 'get_all') + + try: + if limit is not None: + limit = int(limit) + if limit < 0: + msg = _('limit param must be positive') + raise exception.InvalidInput(reason=msg) + except ValueError: + msg = _('limit param must be an integer') + raise exception.InvalidInput(reason=msg) + + if (context.is_admin and 'all_tenants' in filters): + # Need to remove all_tenants to pass the filtering below. + del filters['all_tenants'] + volumes = self.db.volume_get_all(context, marker, limit, sort_key, + sort_dir) + else: + volumes = self.db.volume_get_all_by_project(context, + context.project_id, + marker, limit, + sort_key, sort_dir) + + if filters: + LOG.debug(_("Searching by: %s") % str(filters)) + + def _check_metadata_match(volume, searchdict): + volume_metadata = {} + for i in volume.get('volume_metadata'): + volume_metadata[i['key']] = i['value'] + + for k, v in searchdict.iteritems(): + if (k not in volume_metadata.keys() or + volume_metadata[k] != v): + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'metadata': _check_metadata_match} + + result = [] + not_found = object() + for volume in volumes: + # go over all filters in the list + for opt, values in filters.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + def filter_func(volume, value): + return volume.get(opt, not_found) == value + if not filter_func(volume, values): + break # volume doesn't match this filter + else: # did not break out loop + result.append(volume) # volume matches all filters + volumes = result + + return volumes + + def get_snapshot(self, context, snapshot_id): + check_policy(context, 'get_snapshot') + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_volume(self, context, volume_id): + check_policy(context, 'get_volume') + rv = self.db.volume_get(context, volume_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context, search_opts=None): + check_policy(context, 'get_all_snapshots') + + search_opts = search_opts or {} + + if (context.is_admin and 'all_tenants' in search_opts): + # Need to remove all_tenants to pass the filtering below. + del search_opts['all_tenants'] + snapshots = self.db.snapshot_get_all(context) + else: + snapshots = self.db.snapshot_get_all_by_project( + context, context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + results = [] + not_found = object() + for snapshot in snapshots: + for opt, value in search_opts.iteritems(): + if snapshot.get(opt, not_found) != value: + break + else: + results.append(snapshot) + snapshots = results + return snapshots + + @wrap_check_policy + def check_attach(self, context, volume): + # TODO(vish): abstract status checking? + if volume['status'] != "available": + msg = _("status must be available") + raise exception.InvalidVolume(reason=msg) + if volume['attach_status'] == "attached": + msg = _("already attached") + raise exception.InvalidVolume(reason=msg) + + @wrap_check_policy + def check_detach(self, context, volume): + # TODO(vish): abstract status checking? + if volume['status'] == "available": + msg = _("already detached") + raise exception.InvalidVolume(reason=msg) + + @wrap_check_policy + def reserve_volume(self, context, volume): + #NOTE(jdg): check for Race condition bug 1096983 + #explicitly get updated ref and check + volume = self.db.volume_get(context, volume['id']) + if volume['status'] == 'available': + self.update(context, volume, {"status": "attaching"}) + else: + msg = _("Volume status must be available to reserve") + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + @wrap_check_policy + def unreserve_volume(self, context, volume): + if volume['status'] == "attaching": + self.update(context, volume, {"status": "available"}) + + @wrap_check_policy + def begin_detaching(self, context, volume): + self.update(context, volume, {"status": "detaching"}) + + @wrap_check_policy + def roll_detaching(self, context, volume): + if volume['status'] == "detaching": + self.update(context, volume, {"status": "in-use"}) + + @wrap_check_policy + def attach(self, context, volume, instance_uuid, mountpoint): + return self.volume_rpcapi.attach_volume(context, + volume, + instance_uuid, + mountpoint) + + @wrap_check_policy + def detach(self, context, volume): + return self.volume_rpcapi.detach_volume(context, volume) + + @wrap_check_policy + def initialize_connection(self, context, volume, connector): + return self.volume_rpcapi.initialize_connection(context, + volume, + connector) + + @wrap_check_policy + def terminate_connection(self, context, volume, connector, force=False): + self.unreserve_volume(context, volume) + return self.volume_rpcapi.terminate_connection(context, + volume, + connector, + force) + + def _create_snapshot(self, context, + volume, name, description, + force=False, metadata=None): + check_policy(context, 'create_snapshot', volume) + + if ((not force) and (volume['status'] != "available")): + msg = _("must be available") + raise exception.InvalidVolume(reason=msg) + + try: + if FLAGS.no_snapshot_gb_quota: + reservations = QUOTAS.reserve(context, snapshots=1) + else: + reservations = QUOTAS.reserve(context, snapshots=1, + gigabytes=volume['size']) + except exception.OverQuota as e: + overs = e.kwargs['overs'] + usages = e.kwargs['usages'] + quotas = e.kwargs['quotas'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + if 'gigabytes' in overs: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)sG snapshot (%(d_consumed)dG of " + "%(d_quota)dG already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 's_size': volume['size'], + 'd_consumed': _consumed('gigabytes'), + 'd_quota': quotas['gigabytes']}) + raise exception.VolumeSizeExceedsAvailableQuota() + elif 'snapshots' in overs: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "snapshot (%(d_consumed)d snapshots " + "already consumed)") + + LOG.warn(msg % {'s_pid': context.project_id, + 'd_consumed': _consumed('snapshots')}) + raise exception.SnapshotLimitExceeded( + allowed=quotas['snapshots']) + + self._check_metadata_properties(context, metadata) + options = {'volume_id': volume['id'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description, + 'metadata': metadata} + + try: + snapshot = self.db.snapshot_create(context, options) + QUOTAS.commit(context, reservations) + except Exception: + with excutils.save_and_reraise_exception(): + try: + self.db.snapshot_destroy(context, volume['id']) + finally: + QUOTAS.rollback(context, reservations) + + self.volume_rpcapi.create_snapshot(context, volume, snapshot) + + return snapshot + + def create_snapshot(self, context, + volume, name, + description, metadata=None): + return self._create_snapshot(context, volume, name, description, + False, metadata) + + def create_snapshot_force(self, context, + volume, name, + description, metadata=None): + return self._create_snapshot(context, volume, name, description, + True, metadata) + + @wrap_check_policy + def delete_snapshot(self, context, snapshot, force=False): + if not force and snapshot['status'] not in ["available", "error"]: + msg = _("Volume Snapshot status must be available or error") + raise exception.InvalidSnapshot(reason=msg) + self.db.snapshot_update(context, snapshot['id'], + {'status': 'deleting'}) + volume = self.db.volume_get(context, snapshot['volume_id']) + self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host']) + + @wrap_check_policy + def update_snapshot(self, context, snapshot, fields): + self.db.snapshot_update(context, snapshot['id'], fields) + + @wrap_check_policy + def get_volume_metadata(self, context, volume): + """Get all metadata associated with a volume.""" + rv = self.db.volume_metadata_get(context, volume['id']) + return dict(rv.iteritems()) + + @wrap_check_policy + def delete_volume_metadata(self, context, volume, key): + """Delete the given metadata item from a volume.""" + self.db.volume_metadata_delete(context, volume['id'], key) + + def _check_metadata_properties(self, context, metadata=None): + if not metadata: + metadata = {} + + for k, v in metadata.iteritems(): + if len(k) == 0: + msg = _("Metadata property key blank") + LOG.warn(msg) + raise exception.InvalidVolumeMetadata(reason=msg) + if len(k) > 255: + msg = _("Metadata property key greater than 255 characters") + LOG.warn(msg) + raise exception.InvalidVolumeMetadataSize(reason=msg) + if len(v) > 255: + msg = _("Metadata property value greater than 255 characters") + LOG.warn(msg) + raise exception.InvalidVolumeMetadataSize(reason=msg) + + @wrap_check_policy + def update_volume_metadata(self, context, volume, metadata, delete=False): + """Updates or creates volume metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + orig_meta = self.get_volume_metadata(context, volume) + if delete: + _metadata = metadata + else: + _metadata = orig_meta.copy() + _metadata.update(metadata) + + self._check_metadata_properties(context, _metadata) + + self.db.volume_metadata_update(context, volume['id'], _metadata, True) + + # TODO(jdg): Implement an RPC call for drivers that may use this info + + return _metadata + + def get_volume_metadata_value(self, volume, key): + """Get value of particular metadata key.""" + metadata = volume.get('volume_metadata') + if metadata: + for i in volume['volume_metadata']: + if i['key'] == key: + return i['value'] + return None + + def get_snapshot_metadata(self, context, snapshot): + """Get all metadata associated with a snapshot.""" + rv = self.db.snapshot_metadata_get(context, snapshot['id']) + return dict(rv.iteritems()) + + def delete_snapshot_metadata(self, context, snapshot, key): + """Delete the given metadata item from a snapshot.""" + self.db.snapshot_metadata_delete(context, snapshot['id'], key) + + def update_snapshot_metadata(self, context, + snapshot, metadata, + delete=False): + """Updates or creates snapshot metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + orig_meta = self.get_snapshot_metadata(context, snapshot) + if delete: + _metadata = metadata + else: + _metadata = orig_meta.copy() + _metadata.update(metadata) + + self._check_metadata_properties(context, _metadata) + + self.db.snapshot_metadata_update(context, + snapshot['id'], + _metadata, + True) + + # TODO(jdg): Implement an RPC call for drivers that may use this info + + return _metadata + + def get_snapshot_metadata_value(self, snapshot, key): + pass + + @wrap_check_policy + def get_volume_image_metadata(self, context, volume): + db_data = self.db.volume_glance_metadata_get(context, volume['id']) + return dict( + (meta_entry.key, meta_entry.value) for meta_entry in db_data + ) + + def _check_volume_availability(self, context, volume, force): + """Check if the volume can be used.""" + if volume['status'] not in ['available', 'in-use']: + msg = _('Volume status must be available/in-use.') + raise exception.InvalidVolume(reason=msg) + if not force and 'in-use' == volume['status']: + msg = _('Volume status is in-use.') + raise exception.InvalidVolume(reason=msg) + + @wrap_check_policy + def copy_volume_to_image(self, context, volume, metadata, force): + """Create a new image from the specified volume.""" + self._check_volume_availability(context, volume, force) + + recv_metadata = self.image_service.create(context, metadata) + self.update(context, volume, {'status': 'uploading'}) + self.volume_rpcapi.copy_volume_to_image(context, + volume, + recv_metadata) + + response = {"id": volume['id'], + "updated_at": volume['updated_at'], + "status": 'uploading', + "display_description": volume['display_description'], + "size": volume['size'], + "volume_type": volume['volume_type'], + "image_id": recv_metadata['id'], + "container_format": recv_metadata['container_format'], + "disk_format": recv_metadata['disk_format'], + "image_name": recv_metadata.get('name', None)} + return response + + +class HostAPI(base.Base): + def __init__(self): + super(HostAPI, self).__init__() + + """Sub-set of the Volume Manager API for managing host operations.""" + def set_host_enabled(self, context, host, enabled): + """Sets the specified host's ability to accept new volumes.""" + raise NotImplementedError() + + def get_host_uptime(self, context, host): + """Returns the result of calling "uptime" on the target host.""" + raise NotImplementedError() + + def host_power_action(self, context, host, action): + raise NotImplementedError() + + def set_host_maintenance(self, context, host, mode): + """Start/Stop host maintenance window. On start, it triggers + volume evacuation.""" + raise NotImplementedError() diff --git a/cinder/volume/configuration.py b/cinder/volume/configuration.py new file mode 100644 index 0000000000..13935be676 --- /dev/null +++ b/cinder/volume/configuration.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2012 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Configuration support for all drivers. + +This module allows support for setting configurations either from default +or from a particular FLAGS group, to be able to set multiple configurations +for a given set of values. + +For instance, two lvm configurations can be set by naming them in groups as + + [lvm1] + volume_group=lvm-group-1 + ... + + [lvm2] + volume_group=lvm-group-2 + ... + +And the configuration group name will be passed in so that all calls to +configuration.volume_group within that instance will be mapped to the proper +named group. + +This class also ensures the implementation's configuration is grafted into the +option group. This is due to the way cfg works. All cfg options must be defined +and registered in the group in which they are used. +""" + +from oslo.config import cfg + +from cinder import flags +from cinder.openstack.common import log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class Configuration(object): + + def __init__(self, volume_opts, config_group=None): + """This takes care of grafting the implementation's config + values into the config group""" + self.config_group = config_group + + # set the local conf so that __call__'s know what to use + if self.config_group: + self._ensure_config_values(volume_opts) + self.local_conf = FLAGS._get(self.config_group) + else: + self.local_conf = FLAGS + + def _ensure_config_values(self, volume_opts): + FLAGS.register_opts(volume_opts, + group=self.config_group) + + def append_config_values(self, volume_opts): + self._ensure_config_values(volume_opts) + + def safe_get(self, value): + try: + return self.__getattr__(value) + except cfg.NoSuchOptError: + return None + + def __getattr__(self, value): + return getattr(self.local_conf, value) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py new file mode 100644 index 0000000000..275ced42fd --- /dev/null +++ b/cinder/volume/driver.py @@ -0,0 +1,551 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for volumes. + +""" + +import os +import socket +import time + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.IntOpt('num_shell_tries', + default=3, + help='number of times to attempt to run flakey shell commands'), + cfg.IntOpt('reserved_percentage', + default=0, + help='The percentage of backend capacity is reserved'), + cfg.IntOpt('num_iscsi_scan_tries', + default=3, + help='number of times to rescan iSCSI target to find volume'), + cfg.IntOpt('iscsi_num_targets', + default=100, + help='Number of iscsi target ids per host'), + cfg.StrOpt('iscsi_target_prefix', + default='iqn.2010-10.org.openstack:', + help='prefix for iscsi volumes'), + cfg.StrOpt('iscsi_ip_address', + default='$my_ip', + help='The port that the iSCSI daemon is listening on'), + cfg.IntOpt('iscsi_port', + default=3260, + help='The port that the iSCSI daemon is listening on'), + cfg.StrOpt('volume_backend_name', + default=None, + help='The backend name for a given driver implementation'), ] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) +CONF.import_opt('iscsi_helper', 'cinder.brick.iscsi.iscsi') + + +class VolumeDriver(object): + """Executes commands relating to Volumes.""" + def __init__(self, execute=utils.execute, *args, **kwargs): + # NOTE(vish): db is set by Manager + self.db = None + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(volume_opts) + self.set_execute(execute) + self._stats = {} + + def set_execute(self, execute): + self._execute = execute + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except exception.ProcessExecutionError: + tries = tries + 1 + if tries >= self.configuration.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + def check_for_setup_error(self): + raise NotImplementedError() + + def create_volume(self, volume): + """Creates a volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + raise NotImplementedError() + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + raise NotImplementedError() + + def delete_volume(self, volume): + """Deletes a volume.""" + raise NotImplementedError() + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + + def local_path(self, volume): + raise NotImplementedError() + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a volume.""" + raise NotImplementedError() + + def create_export(self, context, volume): + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" + raise NotImplementedError() + + def remove_export(self, context, volume): + """Removes an export for a volume.""" + raise NotImplementedError() + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + raise NotImplementedError() + + def terminate_connection(self, volume, connector, force=False, **kwargs): + """Disallow connection from connector""" + raise NotImplementedError() + + def attach_volume(self, context, volume_id, instance_uuid, mountpoint): + """ Callback for volume attached to instance.""" + pass + + def detach_volume(self, context, volume_id): + """ Callback for volume detached.""" + pass + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + return None + + def do_setup(self, context): + """Any initialization the volume driver does while starting""" + pass + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + raise NotImplementedError() + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + raise NotImplementedError() + + def clone_image(self, volume, image_location): + """Create a volume efficiently from an existing image. + + image_location is a string whose format depends on the + image service backend in use. The driver should use it + to determine whether cloning is possible. + + Returns a boolean indicating whether cloning occurred + """ + return False + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + raise NotImplementedError() + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + raise NotImplementedError() + + def clear_download(self, context, volume): + """Clean up after an interrupted image copy.""" + pass + + +class ISCSIDriver(VolumeDriver): + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + + ``provider_location`` + if present, contains the iSCSI target information in the same + format as an ietadm discovery + i.e. ':, ' + + ``provider_auth`` + if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ + + def __init__(self, *args, **kwargs): + super(ISCSIDriver, self).__init__(*args, **kwargs) + + def _do_iscsi_discovery(self, volume): + #TODO(justinsb): Deprecate discovery and use stored info + #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + + volume_name = volume['name'] + + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume['host'], + run_as_root=True) + for target in out.splitlines(): + if (self.configuration.iscsi_ip_address in target + and volume_name in target): + return target + return None + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :target_lun: the lun of the iSCSI target + + :volume_id: the id of the volume (currently used by xen) + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + + properties = {} + + location = volume['provider_location'] + + if location: + # provider_location is the same format as iSCSI discovery output + properties['target_discovered'] = False + else: + location = self._do_iscsi_discovery(volume) + + if not location: + msg = (_("Could not find iSCSI export for volume %s") % + (volume['name'])) + raise exception.InvalidVolume(reason=msg) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + results = location.split(" ") + properties['target_portal'] = results[0].split(",")[0] + properties['target_iqn'] = results[1] + try: + properties['target_lun'] = int(results[2]) + except (IndexError, ValueError): + if (self.configuration.volume_driver in + ['cinder.volume.drivers.lvm.LVMISCSIDriver', + 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'] and + self.configuration.iscsi_helper == 'tgtadm'): + properties['target_lun'] = 1 + else: + properties['target_lun'] = 0 + + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return properties + + def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', + iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal'], + *iscsi_command, run_as_root=True, + check_exit_code=check_exit_code) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, iscsi_properties, property_key, property_value, + **kwargs): + iscsi_command = ('--op', 'update', '-n', property_key, + '-v', property_value) + return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value:: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } + + """ + + if CONF.iscsi_helper == 'lioadm': + self.tgtadm.initialize_connection(volume, connector) + + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def _get_iscsi_initiator(self): + """Get iscsi initiator name for this machine""" + # NOTE openiscsi stores initiator name in a file that + # needs root permission to read. + contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') + for l in contents.split('\n'): + if l.startswith('InitiatorName='): + return l[l.index('=') + 1:].strip() + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + LOG.debug(_('copy_image_to_volume %s.') % volume['name']) + connector = {'initiator': self._get_iscsi_initiator(), + 'host': socket.gethostname()} + + iscsi_properties, volume_path = self._attach_volume( + context, volume, connector) + + try: + image_utils.fetch_to_raw(context, + image_service, + image_id, + volume_path) + finally: + self.terminate_connection(volume, connector) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + LOG.debug(_('copy_volume_to_image %s.') % volume['name']) + connector = {'initiator': self._get_iscsi_initiator(), + 'host': socket.gethostname()} + + iscsi_properties, volume_path = self._attach_volume( + context, volume, connector) + + try: + image_utils.upload_volume(context, + image_service, + image_meta, + volume_path) + finally: + self.terminate_connection(volume, connector) + + def _attach_volume(self, context, volume, connector): + """Attach the volume.""" + iscsi_properties = None + host_device = None + init_conn = self.initialize_connection(volume, connector) + iscsi_properties = init_conn['data'] + + # code "inspired by" nova/virt/libvirt/volume.py + try: + self._run_iscsiadm(iscsi_properties, ()) + except exception.ProcessExecutionError as exc: + # iscsiadm returns 21 for "No records found" after version 2.0-871 + if exc.exit_code in [21, 255]: + self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + else: + raise + + if iscsi_properties.get('auth_method'): + self._iscsiadm_update(iscsi_properties, + "node.session.auth.authmethod", + iscsi_properties['auth_method']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.username", + iscsi_properties['auth_username']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.password", + iscsi_properties['auth_password']) + + # NOTE(vish): If we have another lun on the same target, we may + # have a duplicate login + self._run_iscsiadm(iscsi_properties, ("--login",), + check_exit_code=[0, 255]) + + self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") + + host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" % + (iscsi_properties['target_portal'], + iscsi_properties['target_iqn'], + iscsi_properties.get('target_lun', 0))) + + tries = 0 + while not os.path.exists(host_device): + if tries >= self.configuration.num_iscsi_scan_tries: + raise exception.CinderException( + _("iSCSI device not found at %s") % (host_device)) + + LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. " + "Will rescan & retry. Try number: %(tries)s") % + locals()) + + # The rescan isn't documented as being necessary(?), but it helps + self._run_iscsiadm(iscsi_properties, ("--rescan",)) + + tries = tries + 1 + if not os.path.exists(host_device): + time.sleep(tries ** 2) + + if tries != 0: + LOG.debug(_("Found iSCSI node %(host_device)s " + "(after %(tries)s rescans)") % + locals()) + + return iscsi_properties, host_device + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'Generic_iSCSI' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data + + +class FakeISCSIDriver(ISCSIDriver): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, + *args, **kwargs) + + def check_for_setup_error(self): + """No setup necessary in fake mode.""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'iscsi', + 'data': {} + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + @staticmethod + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command.""" + LOG.debug(_("FAKE ISCSI: %s"), cmd) + return (None, None) + + +class FibreChannelDriver(VolumeDriver): + """Executes commands relating to Fibre Channel volumes.""" + def __init__(self, *args, **kwargs): + super(FibreChannelDriver, self).__init__(*args, **kwargs) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. + + The driver returns a driver_volume_type of 'fibre_channel'. + The target_wwn can be a single entry or a list of wwns that + correspond to the list of remote wwn(s) that will export the volume. + Example return values: + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': '1234567890123', + } + } + + or + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': ['1234567890123', '0987654321321'], + } + } + + """ + msg = _("Driver must implement initialize_connection") + raise NotImplementedError(msg) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + raise NotImplementedError() + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + raise NotImplementedError() diff --git a/cinder/volume/drivers/__init__.py b/cinder/volume/drivers/__init__.py new file mode 100644 index 0000000000..815a442c69 --- /dev/null +++ b/cinder/volume/drivers/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.volume.driver` -- Cinder Drivers +===================================================== + +.. automodule:: cinder.volume.driver + :platform: Unix + :synopsis: Module containing all the Cinder drivers. +""" diff --git a/cinder/volume/drivers/coraid.py b/cinder/volume/drivers/coraid.py new file mode 100644 index 0000000000..0c251304bc --- /dev/null +++ b/cinder/volume/drivers/coraid.py @@ -0,0 +1,424 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Alyseo. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Desc : Driver to store volumes on Coraid Appliances. +Require : Coraid EtherCloud ESM, Coraid VSX and Coraid SRX. +Author : Jean-Baptiste RANSY +Contrib : Larry Matter +""" + +import cookielib +import os +import time +import urllib2 + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder import flags +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +FLAGS = flags.FLAGS +coraid_opts = [ + cfg.StrOpt('coraid_esm_address', + default='', + help='IP address of Coraid ESM'), + cfg.StrOpt('coraid_user', + default='admin', + help='User name to connect to Coraid ESM'), + cfg.StrOpt('coraid_group', + default='admin', + help='Name of group on Coraid ESM to which coraid_user belongs' + ' (must have admin privilege)'), + cfg.StrOpt('coraid_password', + default='password', + help='Password to connect to Coraid ESM'), + cfg.StrOpt('coraid_repository_key', + default='coraid_repository', + help='Volume Type key name to store ESM Repository Name'), +] +FLAGS.register_opts(coraid_opts) + + +class CoraidException(Exception): + def __init__(self, message=None, error=None): + super(CoraidException, self).__init__(message, error) + + def __str__(self): + return '%s: %s' % self.args + + +class CoraidRESTException(CoraidException): + pass + + +class CoraidESMException(CoraidException): + pass + + +class CoraidRESTClient(object): + """Executes volume driver commands on Coraid ESM EtherCloud Appliance.""" + + def __init__(self, ipaddress, user, group, password): + self.url = "https://%s:8443/" % ipaddress + self.user = user + self.group = group + self.password = password + self.session = False + self.cookiejar = cookielib.CookieJar() + self.urlOpener = urllib2.build_opener( + urllib2.HTTPCookieProcessor(self.cookiejar)) + LOG.debug(_('Running with CoraidDriver for ESM EtherCLoud')) + + def _login(self): + """Login and Session Handler.""" + if not self.session or self.session < time.time(): + url = ('admin?op=login&username=%s&password=%s' % + (self.user, self.password)) + data = 'Login' + reply = self._admin_esm_cmd(url, data) + if reply.get('state') == 'adminSucceed': + self.session = time.time() + 1100 + msg = _('Update session cookie %(session)s') + LOG.debug(msg % dict(session=self.session)) + self._set_group(reply) + return True + else: + errmsg = reply.get('message', '') + msg = _('Message : %(message)s') + raise CoraidESMException(msg % dict(message=errmsg)) + return True + + def _set_group(self, reply): + """Set effective group.""" + if self.group: + group = self.group + groupId = self._get_group_id(group, reply) + if groupId: + url = ('admin?op=setRbacGroup&groupId=%s' % (groupId)) + data = 'Group' + reply = self._admin_esm_cmd(url, data) + if reply.get('state') == 'adminSucceed': + return True + else: + errmsg = reply.get('message', '') + msg = _('Error while trying to set group: %(message)s') + raise CoraidRESTException(msg % dict(message=errmsg)) + else: + msg = _('Unable to find group: %(group)s') + raise CoraidESMException(msg % dict(group=group)) + return True + + def _get_group_id(self, groupName, loginResult): + """Map group name to group ID.""" + # NOTE(lmatter): All other groups are under the admin group + fullName = "admin group:%s" % groupName + groupId = False + for kid in loginResult['values']: + fullPath = kid['fullPath'] + if fullPath == fullName: + return kid['groupId'] + return False + + def _esm_cmd(self, url=False, data=None): + self._login() + return self._admin_esm_cmd(url, data) + + def _admin_esm_cmd(self, url=False, data=None): + """ + _admin_esm_cmd represent the entry point to send requests to ESM + Appliance. Send the HTTPS call, get response in JSON + convert response into Python Object and return it. + """ + if url: + url = self.url + url + + req = urllib2.Request(url, data) + + try: + res = self.urlOpener.open(req).read() + except Exception: + raise CoraidRESTException(_('ESM urlOpen error')) + + try: + res_json = jsonutils.loads(res) + except Exception: + raise CoraidRESTException(_('JSON Error')) + + return res_json + else: + raise CoraidRESTException(_('Request without URL')) + + def _configure(self, data): + """In charge of all commands into 'configure'.""" + url = 'configure' + LOG.debug(_('Configure data : %s'), data) + response = self._esm_cmd(url, data) + LOG.debug(_("Configure response : %s"), response) + if response: + if response.get('configState') == 'completedSuccessfully': + return True + else: + errmsg = response.get('message', '') + msg = _('Message : %(message)s') + raise CoraidESMException(msg % dict(message=errmsg)) + return False + + def _get_volume_info(self, volume_name): + """Retrive volume informations for a given volume name.""" + url = 'fetch?shelf=cms&orchStrRepo&lv=%s' % (volume_name) + try: + response = self._esm_cmd(url) + info = response[0][1]['reply'][0] + return {"pool": info['lv']['containingPool'], + "repo": info['repoName'], + "vsxidx": info['lv']['lunIndex'], + "index": info['lv']['lvStatus']['exportedLun']['lun'], + "shelf": info['lv']['lvStatus']['exportedLun']['shelf']} + except Exception: + msg = _('Unable to retrive volume infos for volume %(volname)s') + raise CoraidESMException(msg % dict(volname=volume_name)) + + def _get_lun_address(self, volume_name): + """Return AoE Address for a given Volume.""" + volume_info = self._get_volume_info(volume_name) + shelf = volume_info['shelf'] + lun = volume_info['index'] + return {'shelf': shelf, 'lun': lun} + + def create_lun(self, volume_name, volume_size, repository): + """Create LUN on Coraid Backend Storage.""" + data = '[{"addr":"cms","data":"{' \ + '\\"servers\\":[\\"\\"],' \ + '\\"repoName\\":\\"%s\\",' \ + '\\"size\\":\\"%sG\\",' \ + '\\"lvName\\":\\"%s\\"}",' \ + '"op":"orchStrLun",' \ + '"args":"add"}]' % (repository, volume_size, + volume_name) + return self._configure(data) + + def delete_lun(self, volume_name): + """Delete LUN.""" + volume_info = self._get_volume_info(volume_name) + repository = volume_info['repo'] + data = '[{"addr":"cms","data":"{' \ + '\\"repoName\\":\\"%s\\",' \ + '\\"lvName\\":\\"%s\\"}",' \ + '"op":"orchStrLun/verified",' \ + '"args":"delete"}]' % (repository, volume_name) + return self._configure(data) + + def create_snapshot(self, volume_name, snapshot_name): + """Create Snapshot.""" + volume_info = self._get_volume_info(volume_name) + repository = volume_info['repo'] + data = '[{"addr":"cms","data":"{' \ + '\\"repoName\\":\\"%s\\",' \ + '\\"lvName\\":\\"%s\\",' \ + '\\"newLvName\\":\\"%s\\"}",' \ + '"op":"orchStrLunMods",' \ + '"args":"addClSnap"}]' % (repository, volume_name, + snapshot_name) + return self._configure(data) + + def delete_snapshot(self, snapshot_name): + """Delete Snapshot.""" + snapshot_info = self._get_volume_info(snapshot_name) + repository = snapshot_info['repo'] + data = '[{"addr":"cms","data":"{' \ + '\\"repoName\\":\\"%s\\",' \ + '\\"lvName\\":\\"%s\\"}",' \ + '"op":"orchStrLunMods",' \ + '"args":"delClSnap"}]' % (repository, snapshot_name) + return self._configure(data) + + def create_volume_from_snapshot(self, snapshot_name, + volume_name, repository): + """Create a LUN from a Snapshot.""" + snapshot_info = self._get_volume_info(snapshot_name) + snapshot_repo = snapshot_info['repo'] + data = '[{"addr":"cms","data":"{' \ + '\\"lvName\\":\\"%s\\",' \ + '\\"repoName\\":\\"%s\\",' \ + '\\"newLvName\\":\\"%s\\",' \ + '\\"newRepoName\\":\\"%s\\"}",' \ + '"op":"orchStrLunMods",' \ + '"args":"addClone"}]' % (snapshot_name, snapshot_repo, + volume_name, repository) + return self._configure(data) + + +class CoraidDriver(driver.VolumeDriver): + """This is the Class to set in cinder.conf (volume_driver).""" + + def __init__(self, *args, **kwargs): + super(CoraidDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(coraid_opts) + + def do_setup(self, context): + """Initialize the volume driver.""" + self.esm = CoraidRESTClient(self.configuration.coraid_esm_address, + self.configuration.coraid_user, + self.configuration.coraid_group, + self.configuration.coraid_password) + + def check_for_setup_error(self): + """Return an error if prerequisites aren't met.""" + if not self.esm._login(): + raise LookupError(_("Cannot login on Coraid ESM")) + + def _get_repository(self, volume_type): + """ + Return the ESM Repository from the Volume Type. + The ESM Repository is stored into a volume_type_extra_specs key. + """ + volume_type_id = volume_type['id'] + repository_key_name = self.configuration.coraid_repository_key + repository = volume_types.get_volume_type_extra_specs( + volume_type_id, repository_key_name) + return repository + + def create_volume(self, volume): + """Create a Volume.""" + try: + repository = self._get_repository(volume['volume_type']) + self.esm.create_lun(volume['name'], volume['size'], repository) + except Exception: + msg = _('Fail to create volume %(volname)s') + LOG.debug(msg % dict(volname=volume['name'])) + raise + # NOTE(jbr_): The manager currently interprets any return as + # being the model_update for provider location. + # return None to not break it (thank to jgriffith and DuncanT) + return + + def delete_volume(self, volume): + """Delete a Volume.""" + try: + self.esm.delete_lun(volume['name']) + except Exception: + msg = _('Failed to delete volume %(volname)s') + LOG.debug(msg % dict(volname=volume['name'])) + raise + return + + def create_snapshot(self, snapshot): + """Create a Snapshot.""" + try: + volume_name = (FLAGS.volume_name_template + % snapshot['volume_id']) + snapshot_name = (FLAGS.snapshot_name_template + % snapshot['id']) + self.esm.create_snapshot(volume_name, snapshot_name) + except Exception, e: + msg = _('Failed to Create Snapshot %(snapname)s') + LOG.debug(msg % dict(snapname=snapshot_name)) + raise + return + + def delete_snapshot(self, snapshot): + """Delete a Snapshot.""" + try: + snapshot_name = (FLAGS.snapshot_name_template + % snapshot['id']) + self.esm.delete_snapshot(snapshot_name) + except Exception: + msg = _('Failed to Delete Snapshot %(snapname)s') + LOG.debug(msg % dict(snapname=snapshot_name)) + raise + return + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a Volume from a Snapshot.""" + try: + snapshot_name = (FLAGS.snapshot_name_template + % snapshot['id']) + repository = self._get_repository(volume['volume_type']) + self.esm.create_volume_from_snapshot(snapshot_name, + volume['name'], + repository) + except Exception: + msg = _('Failed to Create Volume from Snapshot %(snapname)s') + LOG.debug(msg % dict(snapname=snapshot_name)) + raise + return + + def initialize_connection(self, volume, connector): + """Return connection information.""" + try: + infos = self.esm._get_lun_address(volume['name']) + shelf = infos['shelf'] + lun = infos['lun'] + + aoe_properties = { + 'target_shelf': shelf, + 'target_lun': lun, + } + return { + 'driver_volume_type': 'aoe', + 'data': aoe_properties, + } + except Exception: + msg = _('Failed to Initialize Connection. ' + 'Volume Name: %(volname)s ' + 'Shelf: %(shelf)s, ' + 'Lun: %(lun)s') + LOG.debug(msg % dict(volname=volume['name'], + shelf=shelf, + lun=lun)) + raise + return + + def get_volume_stats(self, refresh=False): + """Return Volume Stats.""" + data = {'driver_version': '1.0', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'storage_protocol': 'aoe', + 'total_capacity_gb': 'unknown', + 'vendor_name': 'Coraid'} + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or 'EtherCloud ESM' + return data + + def local_path(self, volume): + pass + + def create_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def ensure_export(self, context, volume): + pass + + def attach_volume(self, context, volume, instance_uuid, mountpoint): + pass + + def detach_volume(self, context, volume): + pass diff --git a/cinder/volume/drivers/emc/__init__.py b/cinder/volume/drivers/emc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/emc/cinder_emc_config.xml.sample b/cinder/volume/drivers/emc/cinder_emc_config.xml.sample new file mode 100644 index 0000000000..d67ff37df6 --- /dev/null +++ b/cinder/volume/drivers/emc/cinder_emc_config.xml.sample @@ -0,0 +1,12 @@ + + + +gold + +openstack + +x.x.x.x +xxxx +xxxxxxxx +xxxxxxxx + diff --git a/cinder/volume/drivers/emc/emc_smis_common.py b/cinder/volume/drivers/emc/emc_smis_common.py new file mode 100644 index 0000000000..31f094bfe0 --- /dev/null +++ b/cinder/volume/drivers/emc/emc_smis_common.py @@ -0,0 +1,1564 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 EMC Corporation. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common class for SMI-S based EMC volume drivers. + +This common class is for EMC volume drivers based on SMI-S. +It supports VNX and VMAX arrays. + +""" + +import time + +from oslo.config import cfg +from xml.dom.minidom import parseString + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +FLAGS = flags.FLAGS + +try: + import pywbem +except ImportError: + LOG.info(_('Module PyWBEM not installed. ' + 'Install PyWBEM using the python-pywbem package.')) + +CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml' + + +class EMCSMISCommon(): + """Common code that can be used by ISCSI and FC drivers.""" + + stats = {'driver_version': '1.0', + 'free_capacity_gb': 0, + 'reserved_percentage': 0, + 'storage_protocol': None, + 'total_capacity_gb': 0, + 'vendor_name': 'EMC', + 'volume_backend_name': None} + + def __init__(self, prtcl, configuration=None): + + opt = cfg.StrOpt('cinder_emc_config_file', + default=CINDER_EMC_CONFIG_FILE, + help='use this file for cinder emc plugin ' + 'config data') + FLAGS.register_opt(opt) + self.protocol = prtcl + self.configuration = configuration + self.configuration.append_config_values([opt]) + + ip, port = self._get_ecom_server() + self.user, self.passwd = self._get_ecom_cred() + self.url = 'http://' + ip + ':' + port + self.conn = self._get_ecom_connection() + + def create_volume(self, volume): + """Creates a EMC(VMAX/VNX) volume.""" + + LOG.debug(_('Entering create_volume.')) + volumesize = int(volume['size']) * 1073741824 + volumename = volume['name'] + + LOG.info(_('Create Volume: %(volume)s Size: %(size)lu') + % {'volume': volumename, + 'size': volumesize}) + + self.conn = self._get_ecom_connection() + + storage_type = self._get_storage_type() + + LOG.debug(_('Create Volume: %(volume)s ' + 'Storage type: %(storage_type)s') + % {'volume': volumename, + 'storage_type': storage_type}) + + pool, storage_system = self._find_pool(storage_type) + + LOG.debug(_('Create Volume: %(volume)s Pool: %(pool)s ' + 'Storage System: %(storage_system)s') + % {'volume': volumename, + 'pool': str(pool), + 'storage_system': storage_system}) + + configservice = self._find_storage_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Error Create Volume: %(volumename)s. " + "Storage Configuration Service not found for " + "pool %(storage_type)s.") + % {'volumename': volumename, + 'storage_type': storage_type}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_('Create Volume: %(name)s Method: ' + 'CreateOrModifyElementFromStoragePool ConfigServicie: ' + '%(service)s ElementName: %(name)s InPool: %(pool)s ' + 'ElementType: 5 Size: %(size)lu') + % {'service': str(configservice), + 'name': volumename, + 'pool': str(pool), + 'size': volumesize}) + + rc, job = self.conn.InvokeMethod( + 'CreateOrModifyElementFromStoragePool', + configservice, ElementName=volumename, InPool=pool, + ElementType=self._getnum(5, '16'), + Size=self._getnum(volumesize, '64')) + + LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)lu') + % {'volumename': volumename, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + LOG.error(_('Error Create Volume: %(volumename)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'rc': rc, + 'error': errordesc}) + raise exception.VolumeBackendAPIException(data=errordesc) + + LOG.debug(_('Leaving create_volume: %(volumename)s ' + 'Return code: %(rc)lu') + % {'volumename': volumename, + 'rc': rc}) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + + LOG.debug(_('Entering create_volume_from_snapshot.')) + + snapshotname = snapshot['name'] + volumename = volume['name'] + + LOG.info(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + + self.conn = self._get_ecom_connection() + + snapshot_instance = self._find_lun(snapshot) + storage_system = snapshot_instance['SystemName'] + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Snapshot Instance: ' + '%(snapshotinstance)s Storage System: %(storage_system)s.') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'snapshotinstance': str(snapshot_instance.path), + 'storage_system': storage_system}) + + isVMAX = storage_system.find('SYMMETRIX') + if isVMAX > -1: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s. Create Volume ' + 'from Snapshot is NOT supported on VMAX.') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + repservice = self._find_replication_service(storage_system) + if repservice is None: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s. Cannot find Replication ' + 'Service to create volume from snapshot.') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Method: CreateElementReplica ' + 'ReplicationService: %(service)s ElementName: ' + '%(elementname)s SyncType: 8 SourceElement: ' + '%(sourceelement)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'service': str(repservice), + 'elementname': volumename, + 'sourceelement': str(snapshot_instance.path)}) + + # Create a Clone from snapshot + rc, job = self.conn.InvokeMethod( + 'CreateElementReplica', repservice, + ElementName=volumename, + SyncType=self._getnum(8, '16'), + SourceElement=snapshot_instance.path) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s Snapshot:' + '%(snapshotname)s. Return code: %(rc)lu.' + 'Error: %(error)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s. Successfully clone volume ' + 'from snapshot. Finding the clone relationship.') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + + sync_name, storage_system = self._find_storage_sync_sv_sv( + volumename, snapshotname) + + # Remove the Clone relationshop so it can be used as a regular lun + # 8 - Detach operation + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s. Remove the clone ' + 'relationship. Method: ModifyReplicaSynchronization ' + 'ReplicationService: %(service)s Operation: 8 ' + 'Synchronization: %(sync_name)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'service': str(repservice), + 'sync_name': str(sync_name)}) + + rc, job = self.conn.InvokeMethod( + 'ModifyReplicaSynchronization', + repservice, + Operation=self._getnum(8, '16'), + Synchronization=sync_name) + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving create_volume_from_snapshot: Volume: ' + '%(volumename)s Snapshot: %(snapshotname)s ' + 'Return code: %(rc)lu.') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + LOG.debug(_('Entering create_cloned_volume.')) + + srcname = src_vref['name'] + volumename = volume['name'] + + LOG.info(_('Create a Clone from Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s') + % {'volumename': volumename, + 'srcname': srcname}) + + self.conn = self._get_ecom_connection() + + src_instance = self._find_lun(src_vref) + storage_system = src_instance['SystemName'] + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s Source Instance: ' + '%(src_instance)s Storage System: %(storage_system)s.') + % {'volumename': volumename, + 'srcname': srcname, + 'src_instance': str(src_instance.path), + 'storage_system': storage_system}) + + repservice = self._find_replication_service(storage_system) + if repservice is None: + exception_message = (_('Error Create Cloned Volume: ' + 'Volume: %(volumename)s Source Volume: ' + '%(srcname)s. Cannot find Replication ' + 'Service to create cloned volume.') + % {'volumename': volumename, + 'srcname': srcname}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s Method: CreateElementReplica ' + 'ReplicationService: %(service)s ElementName: ' + '%(elementname)s SyncType: 8 SourceElement: ' + '%(sourceelement)s') + % {'volumename': volumename, + 'srcname': srcname, + 'service': str(repservice), + 'elementname': volumename, + 'sourceelement': str(src_instance.path)}) + + # Create a Clone from source volume + rc, job = self.conn.InvokeMethod( + 'CreateElementReplica', repservice, + ElementName=volumename, + SyncType=self._getnum(8, '16'), + SourceElement=src_instance.path) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Cloned Volume: ' + 'Volume: %(volumename)s Source Volume:' + '%(srcname)s. Return code: %(rc)lu.' + 'Error: %(error)s') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s. Successfully cloned volume ' + 'from source volume. Finding the clone relationship.') + % {'volumename': volumename, + 'srcname': srcname}) + + sync_name, storage_system = self._find_storage_sync_sv_sv( + volumename, srcname) + + # Remove the Clone relationshop so it can be used as a regular lun + # 8 - Detach operation + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s. Remove the clone ' + 'relationship. Method: ModifyReplicaSynchronization ' + 'ReplicationService: %(service)s Operation: 8 ' + 'Synchronization: %(sync_name)s') + % {'volumename': volumename, + 'srcname': srcname, + 'service': str(repservice), + 'sync_name': str(sync_name)}) + + rc, job = self.conn.InvokeMethod( + 'ModifyReplicaSynchronization', + repservice, + Operation=self._getnum(8, '16'), + Synchronization=sync_name) + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Cloned Volume: ' + 'Volume: %(volumename)s ' + 'Source Volume: %(srcname)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving create_cloned_volume: Volume: ' + '%(volumename)s Source Volume: %(srcname)s ' + 'Return code: %(rc)lu.') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc}) + + def delete_volume(self, volume): + """Deletes an EMC volume.""" + LOG.debug(_('Entering delete_volume.')) + volumename = volume['name'] + LOG.info(_('Delete Volume: %(volume)s') + % {'volume': volumename}) + + self.conn = self._get_ecom_connection() + + vol_instance = self._find_lun(volume) + if vol_instance is None: + LOG.error(_('Volume %(name)s not found on the array. ' + 'No volume to delete.') + % {'name': volumename}) + return + + storage_system = vol_instance['SystemName'] + + configservice = self._find_storage_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Error Delete Volume: %(volumename)s. " + "Storage Configuration Service not found.") + % {'volumename': volumename}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + device_id = vol_instance['DeviceID'] + + LOG.debug(_('Delete Volume: %(name)s DeviceID: %(deviceid)s') + % {'name': volumename, + 'deviceid': device_id}) + + LOG.debug(_('Delete Volume: %(name)s Method: EMCReturnToStoragePool ' + 'ConfigServic: %(service)s TheElement: %(vol_instance)s') + % {'service': str(configservice), + 'name': volumename, + 'vol_instance': str(vol_instance.path)}) + + rc, job = self.conn.InvokeMethod( + 'EMCReturnToStoragePool', + configservice, TheElements=[vol_instance.path]) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Delete Volume: %(volumename)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving delete_volume: %(volumename)s Return code: ' + '%(rc)lu') + % {'volumename': volumename, + 'rc': rc}) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + LOG.debug(_('Entering create_snapshot.')) + + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s') + % {'snapshot': snapshotname, + 'volume': volumename}) + + self.conn = self._get_ecom_connection() + + volume = {} + volume['name'] = volumename + volume['provider_location'] = None + vol_instance = self._find_lun(volume) + device_id = vol_instance['DeviceID'] + storage_system = vol_instance['SystemName'] + LOG.debug(_('Device ID: %(deviceid)s: Storage System: ' + '%(storagesystem)s') + % {'deviceid': device_id, + 'storagesystem': storage_system}) + + repservice = self._find_replication_service(storage_system) + if repservice is None: + LOG.error(_("Cannot find Replication Service to create snapshot " + "for volume %s.") % volumename) + exception_message = (_("Cannot find Replication Service to " + "create snapshot for volume %s.") + % volumename) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_("Create Snapshot: Method: CreateElementReplica: " + "Target: %(snapshot)s Source: %(volume)s Replication " + "Service: %(service)s ElementName: %(elementname)s Sync " + "Type: 7 SourceElement: %(sourceelement)s.") + % {'snapshot': snapshotname, + 'volume': volumename, + 'service': str(repservice), + 'elementname': snapshotname, + 'sourceelement': str(vol_instance.path)}) + + rc, job = self.conn.InvokeMethod( + 'CreateElementReplica', repservice, + ElementName=snapshotname, + SyncType=self._getnum(7, '16'), + SourceElement=vol_instance.path) + + LOG.debug(_('Create Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Snapshot: (snapshot)s ' + 'Volume: %(volume)s Error: %(errordesc)s') + % {'snapshot': snapshotname, 'volume': + volumename, 'errordesc': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving create_snapshot: Snapshot: %(snapshot)s ' + 'Volume: %(volume)s Return code: %(rc)lu.') % + {'snapshot': snapshotname, 'volume': volumename, 'rc': rc}) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + LOG.debug(_('Entering delete_snapshot.')) + + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s') + % {'snapshot': snapshotname, + 'volume': volumename}) + + self.conn = self._get_ecom_connection() + + LOG.debug(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s. ' + 'Finding StorageSychronization_SV_SV.') + % {'snapshot': snapshotname, + 'volume': volumename}) + + sync_name, storage_system = self._find_storage_sync_sv_sv( + snapshotname, volumename, False) + if sync_name is None: + LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s ' + 'not found on the array. No snapshot to delete.') + % {'snapshot': snapshotname, + 'volume': volumename}) + return + + repservice = self._find_replication_service(storage_system) + if repservice is None: + exception_message = (_("Cannot find Replication Service to " + "create snapshot for volume %s.") + % volumename) + raise exception.VolumeBackendAPIException(data=exception_message) + + # Delete snapshot - deletes both the target element + # and the snap session + LOG.debug(_("Delete Snapshot: Target: %(snapshot)s " + "Source: %(volume)s. Method: " + "ModifyReplicaSynchronization: " + "Replication Service: %(service)s Operation: 19 " + "Synchronization: %(sync_name)s.") + % {'snapshot': snapshotname, + 'volume': volumename, + 'service': str(repservice), + 'sync_name': str(sync_name)}) + + rc, job = self.conn.InvokeMethod( + 'ModifyReplicaSynchronization', + repservice, + Operation=self._getnum(19, '16'), + Synchronization=sync_name) + + LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Delete Snapshot: Volume: ' + '%(volumename)s Snapshot: ' + '%(snapshotname)s. Return code: %(rc)lu.' + ' Error: %(error)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving delete_snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Return code: %(rc)lu.') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + self.conn = self._get_ecom_connection() + volumename = volume['name'] + LOG.info(_('Create export: %(volume)s') + % {'volume': volumename}) + vol_instance = self._find_lun(volume) + device_id = vol_instance['DeviceID'] + + LOG.debug(_('create_export: Volume: %(volume)s Device ID: ' + '%(device_id)s') + % {'volume': volumename, + 'device_id': device_id}) + + return {'provider_location': device_id} + + # Mapping method for VNX + def _expose_paths(self, configservice, vol_instance, + connector): + """This method maps a volume to a host. + + It adds a volume and initiator to a Storage Group + and therefore maps the volume to the host. + """ + volumename = vol_instance['ElementName'] + lun_name = vol_instance['DeviceID'] + initiators = self._find_initiator_names(connector) + storage_system = vol_instance['SystemName'] + lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller( + storage_system, connector) + + LOG.debug(_('ExposePaths: %(vol)s ConfigServicie: %(service)s ' + 'LUNames: %(lun_name)s InitiatorPortIDs: %(initiator)s ' + 'DeviceAccesses: 2') + % {'vol': str(vol_instance.path), + 'service': str(configservice), + 'lun_name': lun_name, + 'initiator': initiators}) + + if lunmask_ctrl is None: + rc, controller = self.conn.InvokeMethod( + 'ExposePaths', + configservice, LUNames=[lun_name], + InitiatorPortIDs=initiators, + DeviceAccesses=[self._getnum(2, '16')]) + else: + LOG.debug(_('ExposePaths parameter ' + 'LunMaskingSCSIProtocolController: ' + '%(lunmasking)s') + % {'lunmasking': str(lunmask_ctrl)}) + rc, controller = self.conn.InvokeMethod( + 'ExposePaths', + configservice, LUNames=[lun_name], + DeviceAccesses=[self._getnum(2, '16')], + ProtocolControllers=[lunmask_ctrl]) + + if rc != 0L: + msg = (_('Error mapping volume %s.') % volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('ExposePaths for volume %s completed successfully.') + % volumename) + + # Unmapping method for VNX + def _hide_paths(self, configservice, vol_instance, + connector): + """This method unmaps a volume from the host. + + Removes a volume from the Storage Group + and therefore unmaps the volume from the host. + """ + volumename = vol_instance['ElementName'] + device_id = vol_instance['DeviceID'] + lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller_for_vol( + vol_instance, connector) + + LOG.debug(_('HidePaths: %(vol)s ConfigServicie: %(service)s ' + 'LUNames: %(device_id)s LunMaskingSCSIProtocolController: ' + '%(lunmasking)s') + % {'vol': str(vol_instance.path), + 'service': str(configservice), + 'device_id': device_id, + 'lunmasking': str(lunmask_ctrl)}) + + rc, controller = self.conn.InvokeMethod( + 'HidePaths', configservice, + LUNames=[device_id], ProtocolControllers=[lunmask_ctrl]) + + if rc != 0L: + msg = (_('Error unmapping volume %s.') % volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('HidePaths for volume %s completed successfully.') + % volumename) + + # Mapping method for VMAX + def _add_members(self, configservice, vol_instance): + """This method maps a volume to a host. + + Add volume to the Device Masking Group that belongs to + a Masking View. + """ + volumename = vol_instance['ElementName'] + masking_group = self._find_device_masking_group() + + LOG.debug(_('AddMembers: ConfigServicie: %(service)s MaskingGroup: ' + '%(masking_group)s Members: %(vol)s') + % {'service': str(configservice), + 'masking_group': str(masking_group), + 'vol': str(vol_instance.path)}) + + rc, job = self.conn.InvokeMethod( + 'AddMembers', configservice, + MaskingGroup=masking_group, Members=[vol_instance.path]) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + msg = (_('Error mapping volume %(vol)s. %(error)s') % + {'vol': volumename, 'error': errordesc}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('AddMembers for volume %s completed successfully.') + % volumename) + + # Unmapping method for VMAX + def _remove_members(self, configservice, vol_instance): + """This method unmaps a volume from a host. + + Removes volume from the Device Masking Group that belongs to + a Masking View. + """ + volumename = vol_instance['ElementName'] + masking_group = self._find_device_masking_group() + + LOG.debug(_('RemoveMembers: ConfigServicie: %(service)s ' + 'MaskingGroup: %(masking_group)s Members: %(vol)s') + % {'service': str(configservice), + 'masking_group': str(masking_group), + 'vol': str(vol_instance.path)}) + + rc, job = self.conn.InvokeMethod('RemoveMembers', configservice, + MaskingGroup=masking_group, + Members=[vol_instance.path]) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + msg = (_('Error unmapping volume %(vol)s. %(error)s') + % {'vol': volumename, 'error': errordesc}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('RemoveMembers for volume %s completed successfully.') + % volumename) + + def _map_lun(self, volume, connector): + """Maps a volume to the host.""" + volumename = volume['name'] + LOG.info(_('Map volume: %(volume)s') + % {'volume': volumename}) + + vol_instance = self._find_lun(volume) + storage_system = vol_instance['SystemName'] + + configservice = self._find_controller_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Cannot find Controller Configuration " + "Service for storage system %s") + % storage_system) + raise exception.VolumeBackendAPIException(data=exception_message) + + isVMAX = storage_system.find('SYMMETRIX') + if isVMAX > -1: + self._add_members(configservice, vol_instance) + else: + self._expose_paths(configservice, vol_instance, connector) + + def _unmap_lun(self, volume, connector): + """Unmaps a volume from the host.""" + volumename = volume['name'] + LOG.info(_('Unmap volume: %(volume)s') + % {'volume': volumename}) + + device_info = self.find_device_number(volume) + device_number = device_info['hostlunid'] + if device_number is None: + LOG.info(_("Volume %s is not mapped. No volume to unmap.") + % (volumename)) + return + + vol_instance = self._find_lun(volume) + storage_system = vol_instance['SystemName'] + + configservice = self._find_controller_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Cannot find Controller Configuration " + "Service for storage system %s") + % storage_system) + raise exception.VolumeBackendAPIException(data=exception_message) + + isVMAX = storage_system.find('SYMMETRIX') + if isVMAX > -1: + self._remove_members(configservice, vol_instance) + else: + self._hide_paths(configservice, vol_instance, connector) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info.""" + volumename = volume['name'] + LOG.info(_('Initialize connection: %(volume)s') + % {'volume': volumename}) + self.conn = self._get_ecom_connection() + device_info = self.find_device_number(volume) + device_number = device_info['hostlunid'] + if device_number is not None: + LOG.info(_("Volume %s is already mapped.") + % (volumename)) + else: + self._map_lun(volume, connector) + # Find host lun id again after the volume is exported to the host + device_info = self.find_device_number(volume) + + return device_info + + def terminate_connection(self, volume, connector): + """Disallow connection from connector.""" + volumename = volume['name'] + LOG.info(_('Terminate connection: %(volume)s') + % {'volume': volumename}) + self.conn = self._get_ecom_connection() + self._unmap_lun(volume, connector) + + def update_volume_status(self): + """Retrieve status info.""" + LOG.debug(_("Updating volume status")) + self.conn = self._get_ecom_connection() + storage_type = self._get_storage_type() + + pool, storagesystem = self._find_pool(storage_type, True) + + self.stats['total_capacity_gb'] = pool['TotalManagedSpace'] + self.stats['free_capacity_gb'] = pool['RemainingManagedSpace'] + + return self.stats + + def _get_storage_type(self, filename=None): + """Get the storage type from the config file.""" + if filename == None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + storageTypes = dom.getElementsByTagName('StorageType') + if storageTypes is not None and len(storageTypes) > 0: + storageType = storageTypes[0].toxml() + storageType = storageType.replace('', '') + storageType = storageType.replace('', '') + LOG.debug(_("Found Storage Type: %s") % (storageType)) + return storageType + else: + exception_message = (_("Storage type not found.")) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _get_masking_view(self, filename=None): + if filename == None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + views = dom.getElementsByTagName('MaskingView') + if views is not None and len(views) > 0: + view = views[0].toxml().replace('', '') + view = view.replace('', '') + LOG.debug(_("Found Masking View: %s") % (view)) + return view + else: + LOG.debug(_("Masking View not found.")) + return None + + def _get_ecom_cred(self, filename=None): + if filename == None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + ecomUsers = dom.getElementsByTagName('EcomUserName') + if ecomUsers is not None and len(ecomUsers) > 0: + ecomUser = ecomUsers[0].toxml().replace('', '') + ecomUser = ecomUser.replace('', '') + ecomPasswds = dom.getElementsByTagName('EcomPassword') + if ecomPasswds is not None and len(ecomPasswds) > 0: + ecomPasswd = ecomPasswds[0].toxml().replace('', '') + ecomPasswd = ecomPasswd.replace('', '') + if ecomUser is not None and ecomPasswd is not None: + return ecomUser, ecomPasswd + else: + LOG.debug(_("Ecom user not found.")) + return None + + def _get_ecom_server(self, filename=None): + if filename == None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + ecomIps = dom.getElementsByTagName('EcomServerIp') + if ecomIps is not None and len(ecomIps) > 0: + ecomIp = ecomIps[0].toxml().replace('', '') + ecomIp = ecomIp.replace('', '') + ecomPorts = dom.getElementsByTagName('EcomServerPort') + if ecomPorts is not None and len(ecomPorts) > 0: + ecomPort = ecomPorts[0].toxml().replace('', '') + ecomPort = ecomPort.replace('', '') + if ecomIp is not None and ecomPort is not None: + LOG.debug(_("Ecom IP: %(ecomIp)s Port: %(ecomPort)s") % (locals())) + return ecomIp, ecomPort + else: + LOG.debug(_("Ecom server not found.")) + return None + + def _get_ecom_connection(self, filename=None): + conn = pywbem.WBEMConnection(self.url, (self.user, self.passwd), + default_namespace='root/emc') + if conn is None: + exception_message = (_("Cannot connect to ECOM server")) + raise exception.VolumeBackendAPIException(data=exception_message) + + return conn + + def _find_replication_service(self, storage_system): + foundRepService = None + repservices = self.conn.EnumerateInstanceNames( + 'EMC_ReplicationService') + for repservice in repservices: + if storage_system == repservice['SystemName']: + foundRepService = repservice + LOG.debug(_("Found Replication Service: %s") + % (str(repservice))) + break + + return foundRepService + + def _find_storage_configuration_service(self, storage_system): + foundConfigService = None + configservices = self.conn.EnumerateInstanceNames( + 'EMC_StorageConfigurationService') + for configservice in configservices: + if storage_system == configservice['SystemName']: + foundConfigService = configservice + LOG.debug(_("Found Storage Configuration Service: %s") + % (str(configservice))) + break + + return foundConfigService + + def _find_controller_configuration_service(self, storage_system): + foundConfigService = None + configservices = self.conn.EnumerateInstanceNames( + 'EMC_ControllerConfigurationService') + for configservice in configservices: + if storage_system == configservice['SystemName']: + foundConfigService = configservice + LOG.debug(_("Found Controller Configuration Service: %s") + % (str(configservice))) + break + + return foundConfigService + + def _find_storage_hardwareid_service(self, storage_system): + foundConfigService = None + configservices = self.conn.EnumerateInstanceNames( + 'EMC_StorageHardwareIDManagementService') + for configservice in configservices: + if storage_system == configservice['SystemName']: + foundConfigService = configservice + LOG.debug(_("Found Storage Hardware ID Management Service: %s") + % (str(configservice))) + break + + return foundConfigService + + # Find pool based on storage_type + def _find_pool(self, storage_type, details=False): + foundPool = None + systemname = None + # Only get instance names if details flag is False; + # Otherwise get the whole instances + if details is False: + vpools = self.conn.EnumerateInstanceNames( + 'EMC_VirtualProvisioningPool') + upools = self.conn.EnumerateInstanceNames( + 'EMC_UnifiedStoragePool') + else: + vpools = self.conn.EnumerateInstances( + 'EMC_VirtualProvisioningPool') + upools = self.conn.EnumerateInstances( + 'EMC_UnifiedStoragePool') + + for upool in upools: + poolinstance = upool['InstanceID'] + # Example: CLARiiON+APM00115204878+U+Pool 0 + poolname, systemname = self._parse_pool_instance_id(poolinstance) + if poolname is not None and systemname is not None: + if str(storage_type) == str(poolname): + foundPool = upool + break + + if foundPool is None: + for vpool in vpools: + poolinstance = vpool['InstanceID'] + # Example: SYMMETRIX+000195900551+TP+Sol_Innov + poolname, systemname = self._parse_pool_instance_id( + poolinstance) + if poolname is not None and systemname is not None: + if str(storage_type) == str(poolname): + foundPool = vpool + break + + if foundPool is None: + exception_message = (_("Pool %(storage_type)s is not found.") + % {'storage_type': storage_type}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + if systemname is None: + exception_message = (_("Storage system not found for pool " + "%(storage_type)s.") + % {'storage_type': storage_type}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_("Pool: %(pool)s SystemName: %(systemname)s.") + % {'pool': str(foundPool), 'systemname': systemname}) + return foundPool, systemname + + def _parse_pool_instance_id(self, instanceid): + # Example of pool InstanceId: CLARiiON+APM00115204878+U+Pool 0 + poolname = None + systemname = None + endp = instanceid.rfind('+') + if endp > -1: + poolname = instanceid[endp + 1:] + + idarray = instanceid.split('+') + if len(idarray) > 2: + systemname = idarray[0] + '+' + idarray[1] + + LOG.debug(_("Pool name: %(poolname)s System name: %(systemname)s.") + % {'poolname': poolname, 'systemname': systemname}) + return poolname, systemname + + def _find_lun(self, volume): + foundinstance = None + try: + device_id = volume['provider_location'] + except Exception: + device_id = None + + volumename = volume['name'] + + names = self.conn.EnumerateInstanceNames('EMC_StorageVolume') + + for n in names: + if device_id is not None: + if n['DeviceID'] == device_id: + vol_instance = self.conn.GetInstance(n) + foundinstance = vol_instance + break + else: + continue + + else: + vol_instance = self.conn.GetInstance(n) + if vol_instance['ElementName'] == volumename: + foundinstance = vol_instance + volume['provider_location'] = foundinstance['DeviceID'] + break + + if foundinstance is None: + LOG.debug(_("Volume %(volumename)s not found on the array.") + % {'volumename': volumename}) + else: + LOG.debug(_("Volume name: %(volumename)s Volume instance: " + "%(vol_instance)s.") + % {'volumename': volumename, + 'vol_instance': str(foundinstance.path)}) + + return foundinstance + + def _find_storage_sync_sv_sv(self, snapshotname, volumename, + waitforsync=True): + foundsyncname = None + storage_system = None + percent_synced = 0 + + LOG.debug(_("Source: %(volumename)s Target: %(snapshotname)s.") + % {'volumename': volumename, 'snapshotname': snapshotname}) + + names = self.conn.EnumerateInstanceNames( + 'SE_StorageSynchronized_SV_SV') + + for n in names: + snapshot_instance = self.conn.GetInstance(n['SyncedElement'], + LocalOnly=False) + if snapshotname != snapshot_instance['ElementName']: + continue + + vol_instance = self.conn.GetInstance(n['SystemElement'], + LocalOnly=False) + if vol_instance['ElementName'] == volumename: + foundsyncname = n + storage_system = vol_instance['SystemName'] + if waitforsync: + sync_instance = self.conn.GetInstance(n, LocalOnly=False) + percent_synced = sync_instance['PercentSynced'] + break + + if foundsyncname is None: + LOG.debug(_("Source: %(volumename)s Target: %(snapshotname)s. " + "Storage Synchronized not found. ") + % {'volumename': volumename, + 'snapshotname': snapshotname}) + else: + LOG.debug(_("Storage system: %(storage_system)s " + "Storage Synchronized instance: %(sync)s.") + % {'storage_system': storage_system, + 'sync': str(foundsyncname)}) + # Wait for SE_StorageSynchronized_SV_SV to be fully synced + while waitforsync and percent_synced < 100: + time.sleep(10) + sync_instance = self.conn.GetInstance(foundsyncname, + LocalOnly=False) + percent_synced = sync_instance['PercentSynced'] + + return foundsyncname, storage_system + + def _find_initiator_names(self, connector): + foundinitiatornames = [] + iscsi = 'iscsi' + fc = 'fc' + name = 'initiator name' + if self.protocol.lower() == iscsi and connector['initiator']: + foundinitiatornames.append(connector['initiator']) + elif self.protocol.lower() == fc and connector['wwpns']: + for wwn in connector['wwpns']: + foundinitiatornames.append(wwn) + name = 'world wide port names' + + if foundinitiatornames is None or len(foundinitiatornames) == 0: + msg = (_('Error finding %s.') % name) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_("Found %(name)s: %(initiator)s.") + % {'name': name, + 'initiator': foundinitiatornames}) + return foundinitiatornames + + def _wait_for_job_complete(self, job): + jobinstancename = job['Job'] + + while True: + jobinstance = self.conn.GetInstance(jobinstancename, + LocalOnly=False) + jobstate = jobinstance['JobState'] + # From ValueMap of JobState in CIM_ConcreteJob + # 2L=New, 3L=Starting, 4L=Running, 32767L=Queue Pending + # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, + # 32768..65535"), + # Values("New, Starting, Running, Suspended, Shutting Down, + # Completed, Terminated, Killed, Exception, Service, + # Query Pending, DMTF Reserved, Vendor Reserved")] + if jobstate in [2L, 3L, 4L, 32767L]: + time.sleep(10) + else: + break + + rc = jobinstance['ErrorCode'] + errordesc = jobinstance['ErrorDescription'] + + return rc, errordesc + + # Find LunMaskingSCSIProtocolController for the local host on the + # specified storage system + def _find_lunmasking_scsi_protocol_controller(self, storage_system, + connector): + foundCtrl = None + initiators = self._find_initiator_names(connector) + controllers = self.conn.EnumerateInstanceNames( + 'EMC_LunMaskingSCSIProtocolController') + for ctrl in controllers: + if storage_system != ctrl['SystemName']: + continue + associators = self.conn.Associators( + ctrl, + resultClass='EMC_StorageHardwareID') + for assoc in associators: + # if EMC_StorageHardwareID matches the initiator, + # we found the existing EMC_LunMaskingSCSIProtocolController + # (Storage Group for VNX) + # we can use for masking a new LUN + hardwareid = assoc['StorageID'] + for initiator in initiators: + if hardwareid.lower() == initiator.lower(): + foundCtrl = ctrl + break + + if foundCtrl is not None: + break + + if foundCtrl is not None: + break + + LOG.debug(_("LunMaskingSCSIProtocolController for storage system " + "%(storage_system)s and initiator %(initiator)s is " + "%(ctrl)s.") + % {'storage_system': storage_system, + 'initiator': initiators, + 'ctrl': str(foundCtrl)}) + return foundCtrl + + # Find LunMaskingSCSIProtocolController for the local host and the + # specified storage volume + def _find_lunmasking_scsi_protocol_controller_for_vol(self, vol_instance, + connector): + foundCtrl = None + initiators = self._find_initiator_names(connector) + controllers = self.conn.AssociatorNames( + vol_instance.path, + resultClass='EMC_LunMaskingSCSIProtocolController') + + for ctrl in controllers: + associators = self.conn.Associators( + ctrl, + resultClass='EMC_StorageHardwareID') + for assoc in associators: + # if EMC_StorageHardwareID matches the initiator, + # we found the existing EMC_LunMaskingSCSIProtocolController + # (Storage Group for VNX) + # we can use for masking a new LUN + hardwareid = assoc['StorageID'] + for initiator in initiators: + if hardwareid.lower() == initiator.lower(): + foundCtrl = ctrl + break + + if foundCtrl is not None: + break + + if foundCtrl is not None: + break + + LOG.debug(_("LunMaskingSCSIProtocolController for storage volume " + "%(vol)s and initiator %(initiator)s is %(ctrl)s.") + % {'vol': str(vol_instance.path), 'initiator': initiators, + 'ctrl': str(foundCtrl)}) + return foundCtrl + + # Find out how many volumes are mapped to a host + # assoociated to the LunMaskingSCSIProtocolController + def get_num_volumes_mapped(self, volume, connector): + numVolumesMapped = 0 + volumename = volume['name'] + vol_instance = self._find_lun(volume) + if vol_instance is None: + msg = (_('Volume %(name)s not found on the array. ' + 'Cannot determine if there are volumes mapped.') + % {'name': volumename}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + storage_system = vol_instance['SystemName'] + + ctrl = self._find_lunmasking_scsi_protocol_controller( + storage_system, + connector) + + LOG.debug(_("LunMaskingSCSIProtocolController for storage system " + "%(storage)s and %(connector)s is %(ctrl)s.") + % {'storage': storage_system, + 'connector': connector, + 'ctrl': str(ctrl)}) + + associators = conn.Associators( + ctrl, + resultClass='EMC_StorageVolume') + + numVolumesMapped = len(associators) + + LOG.debug(_("Found %(numVolumesMapped)d volumes on storage system " + "%(storage)s mapped to %(initiator)s.") + % {'numVolumesMapped': numVolumesMapped, + 'storage': storage_system, + 'connector': connector}) + + return numVolumesMapped + + # Find an available device number that a host can see + def _find_avail_device_number(self, storage_system): + out_device_number = '000000' + out_num_device_number = 0 + numlist = [] + myunitnames = [] + + unitnames = self.conn.EnumerateInstanceNames( + 'CIM_ProtocolControllerForUnit') + for unitname in unitnames: + controller = unitname['Antecedent'] + if storage_system != controller['SystemName']: + continue + classname = controller['CreationClassName'] + index = classname.find('LunMaskingSCSIProtocolController') + if index > -1: + unitinstance = self.conn.GetInstance(unitname, + LocalOnly=False) + numDeviceNumber = int(unitinstance['DeviceNumber']) + numlist.append(numDeviceNumber) + myunitnames.append(unitname) + + maxnum = max(numlist) + out_num_device_number = maxnum + 1 + + out_device_number = '%06d' % out_num_device_number + + LOG.debug(_("Available device number on %(storage)s: %(device)s.") + % {'storage': storage_system, 'device': out_device_number}) + return out_device_number + + # Find a device number that a host can see for a volume + def find_device_number(self, volume): + out_num_device_number = None + + volumename = volume['name'] + vol_instance = self._find_lun(volume) + storage_system = vol_instance['SystemName'] + sp = None + try: + sp = vol_instance['EMCCurrentOwningStorageProcessor'] + except KeyError: + # VMAX LUN doesn't have this property + pass + + unitnames = self.conn.ReferenceNames( + vol_instance.path, + ResultClass='CIM_ProtocolControllerForUnit') + + for unitname in unitnames: + controller = unitname['Antecedent'] + classname = controller['CreationClassName'] + index = classname.find('LunMaskingSCSIProtocolController') + if index > -1: # VNX + # Get an instance of CIM_ProtocolControllerForUnit + unitinstance = self.conn.GetInstance(unitname, + LocalOnly=False) + numDeviceNumber = int(unitinstance['DeviceNumber'], 16) + out_num_device_number = numDeviceNumber + break + else: + index = classname.find('Symm_LunMaskingView') + if index > -1: # VMAX + unitinstance = self.conn.GetInstance(unitname, + LocalOnly=False) + numDeviceNumber = int(unitinstance['DeviceNumber'], 16) + out_num_device_number = numDeviceNumber + break + + if out_num_device_number is None: + LOG.info(_("Device number not found for volume " + "%(volumename)s %(vol_instance)s.") % + {'volumename': volumename, + 'vol_instance': str(vol_instance.path)}) + else: + LOG.debug(_("Found device number %(device)d for volume " + "%(volumename)s %(vol_instance)s.") % + {'device': out_num_device_number, + 'volumename': volumename, + 'vol_instance': str(vol_instance.path)}) + + data = {'hostlunid': out_num_device_number, + 'storagesystem': storage_system, + 'owningsp': sp} + + LOG.debug(_("Device info: %(data)s.") % {'data': data}) + + return data + + def _find_device_masking_group(self): + """Finds the Device Masking Group in a masking view.""" + foundMaskingGroup = None + maskingview_name = self._get_masking_view() + + maskingviews = self.conn.EnumerateInstanceNames( + 'EMC_LunMaskingSCSIProtocolController') + for view in maskingviews: + instance = self.conn.GetInstance(view, LocalOnly=False) + if maskingview_name == instance['ElementName']: + foundView = view + break + + groups = self.conn.AssociatorNames( + foundView, + ResultClass='SE_DeviceMaskingGroup') + foundMaskingGroup = groups[0] + + LOG.debug(_("Masking view: %(view)s DeviceMaskingGroup: %(masking)s.") + % {'view': maskingview_name, + 'masking': str(foundMaskingGroup)}) + + return foundMaskingGroup + + # Find a StorageProcessorSystem given sp and storage system + def _find_storage_processor_system(self, owningsp, storage_system): + foundSystem = None + systems = self.conn.EnumerateInstanceNames( + 'EMC_StorageProcessorSystem') + for system in systems: + # Clar_StorageProcessorSystem.CreationClassName= + # "Clar_StorageProcessorSystem",Name="CLARiiON+APM00123907237+SP_A" + idarray = system['Name'].split('+') + if len(idarray) > 2: + storsystemname = idarray[0] + '+' + idarray[1] + sp = idarray[2] + + if (storage_system == storsystemname and + owningsp == sp): + foundSystem = system + LOG.debug(_("Found Storage Processor System: %s") + % (str(system))) + break + + return foundSystem + + # Find EMC_iSCSIProtocolEndpoint for the specified sp + def _find_iscsi_protocol_endpoints(self, owningsp, storage_system): + foundEndpoints = [] + + processor = self._find_storage_processor_system( + owningsp, + storage_system) + + associators = self.conn.Associators( + processor, + resultClass='EMC_iSCSIProtocolEndpoint') + for assoc in associators: + # Name = iqn.1992-04.com.emc:cx.apm00123907237.a8,t,0x0001 + # SystemName = CLARiiON+APM00123907237+SP_A+8 + arr = assoc['SystemName'].split('+') + if len(arr) > 2: + processor_name = arr[0] + '+' + arr[1] + '+' + arr[2] + if processor_name == processor['Name']: + arr2 = assoc['Name'].split(',') + if len(arr2) > 1: + foundEndpoints.append(arr2[0]) + + LOG.debug(_("iSCSIProtocolEndpoint for storage system " + "%(storage_system)s and SP %(sp)s is " + "%(endpoint)s.") + % {'storage_system': storage_system, + 'sp': owningsp, + 'endpoint': str(foundEndpoints)}) + return foundEndpoints + + def _getnum(self, num, datatype): + try: + result = { + '8': pywbem.Uint8(num), + '16': pywbem.Uint16(num), + '32': pywbem.Uint32(num), + '64': pywbem.Uint64(num) + } + result = result.get(datatype, num) + except NameError: + result = num + + return result + + # Find target WWNs + def get_target_wwns(self, storage_system, connector): + target_wwns = [] + + configservice = self._find_storage_hardwareid_service( + storage_system) + if configservice is None: + exception_msg = (_("Error finding Storage Hardware ID Service.")) + LOG.error(exception_msg) + raise exception.VolumeBackendAPIException(data=exception_msg) + + hardwareids = self._find_storage_hardwareids(connector) + + LOG.debug(_('EMCGetTargetEndpoints: Service: %(service)s ' + 'Storage HardwareIDs: %(hardwareids)s.') + % {'service': str(configservice), + 'hardwareids': str(hardwareids)}) + + for hardwareid in hardwareids: + rc, targetendpoints = self.conn.InvokeMethod( + 'EMCGetTargetEndpoints', + configservice, + HardwareId=hardwareid) + + if rc != 0L: + msg = (_('Error finding Target WWNs.')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + endpoints = targetendpoints['TargetEndpoints'] + for targetendpoint in endpoints: + wwn = targetendpoint['Name'] + # Add target wwn to the list if it is not already there + if not any(d.get('wwn', None) == wwn for d in target_wwns): + target_wwns.append({'wwn': wwn}) + LOG.debug(_('Add target WWN: %s.') % wwn) + + LOG.debug(_('Target WWNs: %s.') % target_wwns) + + return target_wwns + + # Find Storage Hardware IDs + def _find_storage_hardwareids(self, connector): + foundInstances = [] + wwpns = self._find_initiator_names(connector) + hardwareids = self.conn.EnumerateInstances( + 'SE_StorageHardwareID') + for hardwareid in hardwareids: + storid = hardwareid['StorageID'] + for wwpn in wwpns: + if wwpn.lower() == storid.lower(): + foundInstances.append(hardwareid.path) + + LOG.debug(_("Storage Hardware IDs for %(wwpns)s is " + "%(foundInstances)s.") + % {'wwpns': str(wwpns), + 'foundInstances': str(foundInstances)}) + + return foundInstances diff --git a/cinder/volume/drivers/emc/emc_smis_iscsi.py b/cinder/volume/drivers/emc/emc_smis_iscsi.py new file mode 100644 index 0000000000..25308c53a3 --- /dev/null +++ b/cinder/volume/drivers/emc/emc_smis_iscsi.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 EMC Corporation. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +ISCSI Drivers for EMC VNX and VMAX arrays based on SMI-S. + +""" + +import os +import time + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.emc import emc_smis_common + +LOG = logging.getLogger(__name__) + +FLAGS = flags.FLAGS + + +class EMCSMISISCSIDriver(driver.ISCSIDriver): + """EMC ISCSI Drivers for VMAX and VNX using SMI-S.""" + + def __init__(self, *args, **kwargs): + + super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs) + self.common = emc_smis_common.EMCSMISCommon( + 'iSCSI', + configuration=self.configuration) + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + """Creates a EMC(VMAX/VNX) volume.""" + self.common.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self.common.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Creates a cloned volume.""" + self.common.create_cloned_volume(volume, src_vref) + + def delete_volume(self, volume): + """Deletes an EMC volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + pass + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + return self.common.create_export(context, volume) + + def remove_export(self, context, volume): + """Driver entry point to remove an export for a volume.""" + pass + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + pass + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + the format of the driver data is defined in _get_iscsi_properties. + Example return value:: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } + + """ + self.common.initialize_connection(volume, connector) + + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + + def _do_iscsi_discovery(self, volume): + + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', + self.configuration.iscsi_ip_address, + run_as_root=True) + targets = [] + for target in out.splitlines(): + targets.append(target) + + return targets + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration. + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :target_lun: the lun of the iSCSI target + + :volume_id: the id of the volume (currently used by xen) + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + properties = {} + + location = self._do_iscsi_discovery(volume) + if not location: + raise exception.InvalidVolume(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + device_info = self.common.find_device_number(volume) + if device_info is None or device_info['hostlunid'] is None: + exception_message = (_("Cannot find device number for volume %s") + % volume['name']) + raise exception.VolumeBackendAPIException(data=exception_message) + + device_number = device_info['hostlunid'] + storage_system = device_info['storagesystem'] + + # sp is "SP_A" or "SP_B" + sp = device_info['owningsp'] + endpoints = [] + if sp: + # endpointss example: + # [iqn.1992-04.com.emc:cx.apm00123907237.a8, + # iqn.1992-04.com.emc:cx.apm00123907237.a9] + endpoints = self.common._find_iscsi_protocol_endpoints( + sp, storage_system) + + foundEndpoint = False + for loc in location: + results = loc.split(" ") + properties['target_portal'] = results[0].split(",")[0] + properties['target_iqn'] = results[1] + # owning sp is None for VMAX + # for VNX, find the target_iqn that matches the endpoint + # target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8 + # or iqn.1992-04.com.emc:cx.apm00123907237.b8 + if not sp: + break + for endpoint in endpoints: + if properties['target_iqn'] == endpoint: + LOG.debug(_("Found iSCSI endpoint: %s") % endpoint) + foundEndpoint = True + break + if foundEndpoint: + break + + if sp and not foundEndpoint: + LOG.warn(_("ISCSI endpoint not found for SP %(sp)s on " + "storage system %(storage)s.") + % {'sp': sp, + 'storage': storage_system}) + + properties['target_lun'] = device_number + + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + LOG.debug(_("ISCSI properties: %s") % (properties)) + + return properties + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + self.common.terminate_connection(volume, connector) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self.update_volume_status() + + return self._stats + + def update_volume_status(self): + """Retrieve status info from volume group.""" + LOG.debug(_("Updating volume status")) + data = self.common.update_volume_status() + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or 'EMCSMISISCSIDriver' + data['storage_protocol'] = 'iSCSI' + self._stats = data diff --git a/cinder/volume/drivers/glusterfs.py b/cinder/volume/drivers/glusterfs.py new file mode 100644 index 0000000000..21f3c30c90 --- /dev/null +++ b/cinder/volume/drivers/glusterfs.py @@ -0,0 +1,283 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import os + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.volume.drivers import nfs + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('glusterfs_shares_config', + default='/etc/cinder/glusterfs_shares', + help='File with the list of available gluster shares'), + cfg.StrOpt('glusterfs_mount_point_base', + default='$state_path/mnt', + help='Base dir containing mount points for gluster shares'), + cfg.StrOpt('glusterfs_disk_util', + default='df', + help='Use du or df for free space calculation'), + cfg.BoolOpt('glusterfs_sparsed_volumes', + default=True, + help=('Create volumes as sparsed files which take no space.' + 'If set to False volume is created as regular file.' + 'In such case volume creation takes a lot of time.'))] +VERSION = '1.0' + +FLAGS = flags.FLAGS +FLAGS.register_opts(volume_opts) + + +class GlusterfsDriver(nfs.RemoteFsDriver): + """Gluster based cinder driver. Creates file on Gluster share for using it + as block device on hypervisor.""" + + def __init__(self, *args, **kwargs): + super(GlusterfsDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + super(GlusterfsDriver, self).do_setup(context) + + config = self.configuration.glusterfs_shares_config + if not config: + msg = (_("There's no Gluster config file configured (%s)") % + 'glusterfs_shares_config') + LOG.warn(msg) + raise exception.GlusterfsException(msg) + if not os.path.exists(config): + msg = (_("Gluster config file at %(config)s doesn't exist") % + locals()) + LOG.warn(msg) + raise exception.GlusterfsException(msg) + + try: + self._execute('mount.glusterfs', check_exit_code=False) + except OSError as exc: + if exc.errno == errno.ENOENT: + raise exception.GlusterfsException( + _('mount.glusterfs is not installed')) + else: + raise + + def check_for_setup_error(self): + """Just to override parent behavior.""" + pass + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def create_volume(self, volume): + """Creates a volume.""" + + self._ensure_shares_mounted() + + volume['provider_location'] = self._find_share(volume['size']) + + LOG.info(_('casted to %s') % volume['provider_location']) + + self._do_create_volume(volume) + + return {'provider_location': volume['provider_location']} + + def delete_volume(self, volume): + """Deletes a logical volume.""" + + if not volume['provider_location']: + LOG.warn(_('Volume %s does not have provider_location specified, ' + 'skipping'), volume['name']) + return + + self._ensure_share_mounted(volume['provider_location']) + + mounted_path = self.local_path(volume) + + self._execute('rm', '-f', mounted_path, run_as_root=True) + + def ensure_export(self, ctx, volume): + """Synchronously recreates an export for a logical volume.""" + self._ensure_share_mounted(volume['provider_location']) + + def create_export(self, ctx, volume): + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" + pass + + def remove_export(self, ctx, volume): + """Removes an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + data = {'export': volume['provider_location'], + 'name': volume['name']} + return { + 'driver_volume_type': 'glusterfs', + 'data': data + } + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + pass + + def _do_create_volume(self, volume): + """Create a volume on given glusterfs_share. + :param volume: volume reference + """ + volume_path = self.local_path(volume) + volume_size = volume['size'] + + if self.configuration.glusterfs_sparsed_volumes: + self._create_sparsed_file(volume_path, volume_size) + else: + self._create_regular_file(volume_path, volume_size) + + self._set_rw_permissions_for_all(volume_path) + + def _ensure_shares_mounted(self): + """Look for GlusterFS shares in the flags and try to mount them + locally.""" + self._mounted_shares = [] + + for share in self._load_shares_config(): + try: + self._ensure_share_mounted(share) + self._mounted_shares.append(share) + except Exception, exc: + LOG.warning(_('Exception during mounting %s') % (exc,)) + + LOG.debug('Available shares %s' % str(self._mounted_shares)) + + def _load_shares_config(self): + return [share.strip() for share + in open(self.configuration.glusterfs_shares_config) + if share and not share.startswith('#')] + + def _ensure_share_mounted(self, glusterfs_share): + """Mount GlusterFS share. + :param glusterfs_share: + """ + mount_path = self._get_mount_point_for_share(glusterfs_share) + self._mount_glusterfs(glusterfs_share, mount_path, ensure=True) + + def _find_share(self, volume_size_for): + """Choose GlusterFS share among available ones for given volume size. + Current implementation looks for greatest capacity. + :param volume_size_for: int size in GB + """ + + if not self._mounted_shares: + raise exception.GlusterfsNoSharesMounted() + + greatest_size = 0 + greatest_share = None + + for glusterfs_share in self._mounted_shares: + capacity = self._get_available_capacity(glusterfs_share)[0] + if capacity > greatest_size: + greatest_share = glusterfs_share + greatest_size = capacity + + if volume_size_for * 1024 * 1024 * 1024 > greatest_size: + raise exception.GlusterfsNoSuitableShareFound( + volume_size=volume_size_for) + return greatest_share + + def _get_mount_point_for_share(self, glusterfs_share): + """Return mount point for share. + :param glusterfs_share: example 172.18.194.100:/var/glusterfs + """ + return os.path.join(self.configuration.glusterfs_mount_point_base, + self._get_hash_str(glusterfs_share)) + + def _get_available_capacity(self, glusterfs_share): + """Calculate available space on the GlusterFS share. + :param glusterfs_share: example 172.18.194.100:/var/glusterfs + """ + mount_point = self._get_mount_point_for_share(glusterfs_share) + + out, _ = self._execute('df', '--portability', '--block-size', '1', + mount_point, run_as_root=True) + out = out.splitlines()[1] + + available = 0 + + size = int(out.split()[1]) + if self.configuration.glusterfs_disk_util == 'df': + available = int(out.split()[3]) + else: + out, _ = self._execute('du', '-sb', '--apparent-size', + '--exclude', '*snapshot*', mount_point, + run_as_root=True) + used = int(out.split()[0]) + available = size - used + + return available, size + + def _mount_glusterfs(self, glusterfs_share, mount_path, ensure=False): + """Mount GlusterFS share to mount path.""" + self._execute('mkdir', '-p', mount_path) + + try: + self._execute('mount', '-t', 'glusterfs', glusterfs_share, + mount_path, run_as_root=True) + except exception.ProcessExecutionError as exc: + if ensure and 'already mounted' in exc.stderr: + LOG.warn(_("%s is already mounted"), glusterfs_share) + else: + raise + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, update the stats first.""" + if refresh or not self._stats: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or 'GlusterFS' + data['vendor_name'] = 'Open Source' + data['driver_version'] = VERSION + data['storage_protocol'] = 'glusterfs' + + self._ensure_shares_mounted() + + global_capacity = 0 + global_free = 0 + for nfs_share in self._mounted_shares: + free, capacity = self._get_available_capacity(nfs_share) + global_capacity += capacity + global_free += free + + data['total_capacity_gb'] = global_capacity / 1024.0 ** 3 + data['free_capacity_gb'] = global_free / 1024.0 ** 3 + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data diff --git a/cinder/volume/drivers/huawei/__init__.py b/cinder/volume/drivers/huawei/__init__.py new file mode 100644 index 0000000000..0f4b6d394e --- /dev/null +++ b/cinder/volume/drivers/huawei/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2012 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/volume/drivers/huawei/cinder_huawei_conf.xml.sample b/cinder/volume/drivers/huawei/cinder_huawei_conf.xml.sample new file mode 100644 index 0000000000..8d5a577bf8 --- /dev/null +++ b/cinder/volume/drivers/huawei/cinder_huawei_conf.xml.sample @@ -0,0 +1,34 @@ + + + + x.x.x.x + x.x.x.x + xxxxxx + xxxxxx + + + + Thick + + 64 + + + 1 + + 1 + + + + + + + + + + + + x.x.x.x + + + + diff --git a/cinder/volume/drivers/huawei/huawei_iscsi.py b/cinder/volume/drivers/huawei/huawei_iscsi.py new file mode 100644 index 0000000000..869552d030 --- /dev/null +++ b/cinder/volume/drivers/huawei/huawei_iscsi.py @@ -0,0 +1,1547 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2012 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for HUAWEI T series and Dorado storage systems. +""" +import base64 +import os +import paramiko +import re +import socket +import threading +import time + +from oslo.config import cfg +from xml.etree import ElementTree as ET + +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver + +LOG = logging.getLogger(__name__) + +huawei_opt = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='config data for cinder huawei plugin')] + +HOST_GROUP_NAME = 'HostGroup_OpenStack' +HOST_NAME_PREFIX = 'Host_' +HOST_PORT_PREFIX = 'HostPort_' +VOL_AND_SNAP_NAME_PREFIX = 'OpenStack_' +READBUFFERSIZE = 8192 + + +class SSHConn(utils.SSHPool): + """Define a new class inherited to SSHPool. + + This class rewrites method create() and defines a private method + ssh_read() which reads results of ssh commands. + """ + + def __init__(self, ip, port, conn_timeout, login, password, + privatekey=None, *args, **kwargs): + + super(SSHConn, self).__init__(ip, port, conn_timeout, login, + password, privatekey=None, + *args, **kwargs) + self.lock = threading.Lock() + + def create(self): + """Create an SSH client. + + Because seting socket timeout to be None will cause client.close() + blocking, here we have to rewrite method create() and use default + socket timeout value 0.1. + """ + try: + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if self.password: + ssh.connect(self.ip, + port=self.port, + username=self.login, + password=self.password, + timeout=self.conn_timeout) + elif self.privatekey: + pkfile = os.path.expanduser(self.privatekey) + privatekey = paramiko.RSAKey.from_private_key_file(pkfile) + ssh.connect(self.ip, + port=self.port, + username=self.login, + pkey=privatekey, + timeout=self.conn_timeout) + else: + msg = _("Specify a password or private_key") + raise exception.CinderException(msg) + + if self.conn_timeout: + transport = ssh.get_transport() + transport.set_keepalive(self.conn_timeout) + return ssh + except Exception as e: + msg = _("Error connecting via ssh: %s") % e + LOG.error(msg) + raise paramiko.SSHException(msg) + + def ssh_read(self, channel, cmd, timeout): + """Get results of CLI commands.""" + result = '' + user = self.login + user_flg = user + ':/>$' + channel.settimeout(timeout) + while True: + try: + result = result + channel.recv(READBUFFERSIZE) + except socket.timeout: + raise exception.VolumeBackendAPIException(_('read timed out')) + else: + if re.search(cmd, result) and re.search(user_flg, result): + if not re.search('Welcome', result): + break + elif re.search(user + ':/>' + cmd, result): + break + elif re.search('(y/n)', result): + break + return '\r\n'.join(result.split('\r\n')[:-1]) + + +class HuaweiISCSIDriver(driver.ISCSIDriver): + """Huawei T series and Dorado iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(huawei_opt) + self.device_type = {} + self.login_info = {} + self.hostgroup_id = None + self.ssh_pool = None + + def do_setup(self, context): + """Check config file.""" + LOG.debug(_('do_setup.')) + + self._check_conf_file() + + def check_for_setup_error(self): + """Try to connect with device and get device type.""" + LOG.debug(_('check_for_setup_error.')) + + self.login_info = self._get_login_info() + self.device_type = self._get_device_type() + if not self.device_type['type']: + err_msg = (_('check_for_setup_error: Can not get device type.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + LOG.debug(_('check_for_setup_error: Device type is:%(type)s, ' + 'version is:%(version)s.') + % {'type': self.device_type['type'], + 'version': self.device_type['version']}) + + # Now only version V1 is supported. + if self.device_type['version'] != 'V100R': + err_msg = (_('check_for_setup_error: Product version not right. ' + 'Please make sure the product version is V1.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Check whether storage pools are configured. + # Dorado2100 G2 needn't to configure this. + if self.device_type['type'] != 'Dorado2100 G2': + root = self._read_xml() + pool_node = root.findall('LUN/StoragePool') + if not pool_node: + err_msg = (_('_get_device_type: Storage Pool must be ' + 'configured.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_volume(self, volume): + """Create a new volume.""" + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('create_volume:volume name: %s.') % volume_name) + + self.login_info = self._get_login_info() + if int(volume['size']) == 0: + volume_size = '100M' + else: + volume_size = '%sG' % volume['size'] + + self._create_volume(volume_name, volume_size) + + def delete_volume(self, volume): + """Delete a volume.""" + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('delete_volume: volume name: %s.') % volume_name) + + self.login_info = self._get_login_info() + volume_id = self._find_lun(volume_name) + if volume_id is not None: + self._delete_volume(volume_name, volume_id) + else: + err_msg = (_('delete_volume:No need to delete volume. ' + 'Volume %(name)s does not exist.') + % {'name': volume['name']}) + LOG.error(err_msg) + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('create_export: volume name:%s') % volume['name']) + + lun_id = self._find_lun(volume_name) + if lun_id is None: + err_msg = (_('create_export:Volume %(name)s does not exist.') + % {'name': volume_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + return {'provider_location': lun_id} + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for a existing volume.""" + pass + + def remove_export(self, context, volume_id): + """Driver entry point to remove an export for a volume.""" + pass + + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + initiator_name = connector['initiator'] + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('initialize_connection: volume name: %(volume)s. ' + 'initiator name: %(ini)s.') + % {'volume': volume_name, + 'ini': initiator_name}) + + self.login_info = self._get_login_info() + # Get target iSCSI iqn. + iscsi_conf = self._get_iscsi_info() + target_ip = None + for ini in iscsi_conf['Initiator']: + if ini['Name'] == initiator_name: + target_ip = ini['TargetIP'] + break + if not target_ip: + if not iscsi_conf['DefaultTargetIP']: + err_msg = (_('initialize_connection:Failed to find target ip ' + 'for initiator:%(initiatorname)s, ' + 'please check config file.') + % {'initiatorname': initiator_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + target_ip = iscsi_conf['DefaultTargetIP'] + + (target_iqn, controller) = self._get_tgt_iqn(target_ip) + if not target_iqn: + err_msg = (_('initialize_connection:Failed to find target iSCSI ' + 'iqn. Target IP:%(ip)s') + % {'ip': target_ip}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Create hostgroup and host. + hostgroup_name = HOST_GROUP_NAME + self.hostgroup_id = self._find_hostgroup(hostgroup_name) + if self.hostgroup_id is None: + self._create_hostgroup(hostgroup_name) + self.hostgroup_id = self._find_hostgroup(hostgroup_name) + + host_name = HOST_NAME_PREFIX + str(hash(initiator_name)) + host_id = self._find_host_in_hostgroup(host_name, self.hostgroup_id) + if host_id is None: + self._add_host(host_name, self.hostgroup_id) + host_id = self._find_host_in_hostgroup(host_name, + self.hostgroup_id) + + # Create an initiator. + added = self._check_initiator(initiator_name) + if not added: + self._add_initiator(initiator_name) + + # Add the initiator to host. + port_name = HOST_PORT_PREFIX + str(hash(initiator_name)) + port_info = initiator_name + portadded = False + hostport_info = self._get_hostport_info(host_id) + if hostport_info: + for hostport in hostport_info: + if hostport['info'] == initiator_name: + portadded = True + break + if not portadded: + self._add_hostport(port_name, host_id, port_info) + + LOG.debug(_('initialize_connection:host name: %(host)s, ' + 'initiator name: %(ini)s, ' + 'hostport name: %(port)s') + % {'host': host_name, + 'ini': initiator_name, + 'port': port_name}) + + # Map a LUN to a host if not mapped. + lun_id = self._find_lun(volume_name) + if lun_id is None: + err_msg = (_('initialize_connection:Failed to find the ' + 'given volume. ' + 'volume name:%(volume)s.') + % {'volume': volume_name}) + raise exception.VolumeBackendAPIException(data=err_msg) + + hostlun_id = None + map_info = self._get_map_info(host_id) + # Make sure the hostLUN ID starts from 1. + new_hostlun_id = 1 + new_hostlunid_found = False + if map_info: + for map in map_info: + if map['devlunid'] == lun_id: + hostlun_id = map['hostlunid'] + break + elif not new_hostlunid_found: + if new_hostlun_id < int(map['hostlunid']): + new_hostlunid_found = True + else: + new_hostlun_id = int(map['hostlunid']) + 1 + # The LUN is not mapped to the host. + if not hostlun_id: + self._map_lun(lun_id, host_id, new_hostlun_id) + hostlun_id = self._get_hostlunid(host_id, lun_id) + + # Change lun ownning controller for better performance. + if self._get_lun_controller(lun_id) != controller: + self._change_lun_controller(lun_id, controller) + + # Return iSCSI properties. + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = ('%s:%s' % (target_ip, '3260')) + properties['target_iqn'] = target_iqn + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + initiator_name = connector['initiator'] + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('terminate_connection:volume name: %(volume)s, ' + 'initiator name: %(ini)s.') + % {'volume': volume_name, + 'ini': initiator_name}) + + self.login_info = self._get_login_info() + host_name = HOST_NAME_PREFIX + str(hash(initiator_name)) + host_id = self._find_host_in_hostgroup(host_name, self.hostgroup_id) + if host_id is None: + err_msg = (_('terminate_connection:Host does not exist. ' + 'Host name:%(host)s.') + % {'host': host_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Delete host map. + lun_id = self._find_lun(volume_name) + if lun_id is None: + err_msg = (_('terminate_connection:volume does not exist. ' + 'volume name:%(volume)s') + % {'volume': volume_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + map_id = None + mapnum = 0 + map_info = self._get_map_info(host_id) + if map_info: + mapnum = len(map_info) + for map in map_info: + if map['devlunid'] == lun_id: + map_id = map['mapid'] + break + if map_id is not None: + self._delete_map(map_id) + mapnum = mapnum - 1 + else: + LOG.error(_('terminate_connection:No map between host ' + 'and volume. Host name:%(hostname)s, ' + 'volume name:%(volumename)s.') + % {'hostname': host_name, + 'volumename': volume_name}) + + # Delete host initiator when no LUN mapped to it. + portnum = 0 + hostportinfo = self._get_hostport_info(host_id) + if hostportinfo: + portnum = len(hostportinfo) + for hostport in hostportinfo: + if hostport['info'] == initiator_name and mapnum == 0: + self._delete_hostport(hostport['id']) + self._delete_initiator(initiator_name) + portnum = portnum - 1 + break + else: + LOG.error(_('terminate_connection:No initiator is added ' + 'to the host. Host name:%(hostname)s') + % {'hostname': host_name}) + + # Delete host when no initiator added to it. + if portnum == 0: + self._delete_host(host_id) + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(snapshot['volume_name']) + + LOG.debug(_('create_snapshot:snapshot name:%(snapshot)s, ' + 'volume name:%(volume)s.') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self.login_info = self._get_login_info() + if self.device_type['type'] == 'Dorado2100 G2': + err_msg = (_('create_snapshot:Device does not support snapshot.')) + + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + if self._is_resource_pool_enough() is False: + err_msg = (_('create_snapshot:' + 'Resource pool needs 1GB valid size at least.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + lun_id = self._find_lun(volume_name) + if lun_id is None: + err_msg = (_('create_snapshot:Volume does not exist. ' + 'Volume name:%(name)s') + % {'name': volume_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + self._create_snapshot(snapshot_name, lun_id) + snapshot_id = self._find_snapshot(snapshot_name) + if not snapshot_id: + err_msg = (_('create_snapshot:Snapshot does not exist. ' + 'Snapshot name:%(name)s') + % {'name': snapshot_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + self._active_snapshot(snapshot_id) + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(snapshot['volume_name']) + + LOG.debug(_('delete_snapshot:snapshot name:%(snapshot)s, ' + 'volume name:%(volume)s.') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self.login_info = self._get_login_info() + if self.device_type['type'] == 'Dorado2100 G2': + err_msg = (_('delete_snapshot:Device does not support snapshot.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + snapshot_id = self._find_snapshot(snapshot_name) + if snapshot_id is not None: + self._disable_snapshot(snapshot_id) + self._delete_snapshot(snapshot_id) + else: + err_msg = (_('delete_snapshot:Snapshot does not exist. ' + 'snapshot name:%(snap)s') + % {'snap': snapshot_name}) + LOG.debug(err_msg) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + We use LUNcopy to create a new LUN from snapshot. + """ + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('create_volume_from_snapshot:snapshot ' + 'name:%(snapshot)s, ' + 'volume name:%(volume)s.') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self.login_info = self._get_login_info() + if self.device_type['type'].find('Dorado') > -1: + err_msg = (_('create_volume_from_snapshot:Device does ' + 'not support create volume from snapshot. ' + 'Volume name:%(volume)s, ' + 'snapshot name:%(snapshot)s.') + % {'volume': volume_name, + 'snapshot': snapshot_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + snapshot_id = self._find_snapshot(snapshot_name) + if snapshot_id is None: + err_msg = (_('create_volume_from_snapshot:Snapshot ' + 'does not exist. Snapshot name:%(name)s') + % {'name': snapshot_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Create a target LUN. + if int(volume['size']) == 0: + volume_size = '%sG' % snapshot['volume_size'] + else: + volume_size = '%sG' % volume['size'] + + self._create_volume(volume_name, volume_size) + volume_id = self._find_lun(volume_name) + luncopy_name = volume_name + try: + self._create_luncopy(luncopy_name, snapshot_id, volume_id) + luncopy_id = self._find_luncopy(luncopy_name) + self._start_luncopy(luncopy_id) + self._wait_for_luncopy(luncopy_name) + # If LUNcopy failed,we should delete the target volume. + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_luncopy(luncopy_id) + self._delete_volume(volume_name, volume_id) + + self._delete_luncopy(luncopy_id) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_status() + + return self._stats + + def _check_conf_file(self): + """Check the config file, make sure the key elements are set.""" + root = self._read_xml() + try: + IP1 = root.findtext('Storage/ControllerIP0') + IP2 = root.findtext('Storage/ControllerIP1') + username = root.findtext('Storage/UserName') + pwd = root.findtext('Storage/UserPassword') + + isconfwrong = False + if ((not IP1 and not IP2) or + (not username) or + (not pwd)): + err_msg = (_('Config file is wrong. Controler IP, ' + 'UserName and UserPassword must be set.')) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + except Exception as err: + LOG.error(_('_check_conf_file: %s') % str(err)) + raise exception.VolumeBackendAPIException(data=err) + + def _read_xml(self): + """Open xml file.""" + filename = self.configuration.cinder_huawei_conf_file + try: + tree = ET.parse(filename) + root = tree.getroot() + + except Exception as err: + LOG.error(_('_read_xml:%s') % err) + raise exception.VolumeBackendAPIException(data=err) + return root + + def _get_login_info(self): + """Get login IP, username and password from config file.""" + logininfo = {} + try: + filename = self.configuration.cinder_huawei_conf_file + tree = ET.parse(filename) + root = tree.getroot() + logininfo['ControllerIP0'] = root.findtext('Storage/ControllerIP0') + logininfo['ControllerIP1'] = root.findtext('Storage/ControllerIP1') + + need_encode = False + for key in ['UserName', 'UserPassword']: + node = root.find('Storage/%s' % key) + node_text = node.text + if node_text.find('!$$$') == 0: + logininfo[key] = base64.b64decode(node_text[4:]) + else: + logininfo[key] = node_text + node.text = '!$$$' + base64.b64encode(node_text) + need_encode = True + if need_encode: + try: + tree.write(filename, 'UTF-8') + except Exception as err: + LOG.error(_('Write login information to xml error. %s') + % err) + + except Exception as err: + LOG.error(_('_get_login_info error. %s') % err) + raise exception.VolumeBackendAPIException(data=err) + return logininfo + + def _get_lun_set_info(self): + """Get parameters from config file for creating LUN.""" + # Default LUN set information + lunsetinfo = {'LUNType': 'Thick', + 'StripUnitSize': '64', + 'WriteType': '1', + 'MirrorSwitch': '1', + 'PrefetchType': '3', + 'PrefetchValue': '0', + 'PrefetchTimes': '0', + 'StoragePool': 'RAID_001'} + + root = self._read_xml() + try: + luntype = root.findtext('LUN/LUNType') + if luntype in ['Thick', 'Thin']: + lunsetinfo['LUNType'] = luntype + elif luntype: + err_msg = (_('Config file is wrong. LUNType must be "Thin" ' + ' or "Thick". LUNType:%(type)s') + % {'type': luntype}) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Here we do not judge whether the parameters are right. + # CLI will return error responses if the parameters not right. + stripunitsize = root.findtext('LUN/StripUnitSize') + if stripunitsize: + lunsetinfo['StripUnitSize'] = stripunitsize + writetype = root.findtext('LUN/WriteType') + if writetype: + lunsetinfo['WriteType'] = writetype + mirrorswitch = root.findtext('LUN/MirrorSwitch') + if mirrorswitch: + lunsetinfo['MirrorSwitch'] = mirrorswitch + + if self.device_type['type'] == 'Tseries': + pooltype = lunsetinfo['LUNType'] + prefetch = root.find('LUN/Prefetch') + if prefetch and prefetch.attrib['Type']: + lunsetinfo['PrefetchType'] = prefetch.attrib['Type'] + if lunsetinfo['PrefetchType'] == '1': + lunsetinfo['PrefetchValue'] = prefetch.attrib['Value'] + elif lunsetinfo['PrefetchType'] == '2': + lunsetinfo['PrefetchTimes'] = prefetch.attrib['Value'] + else: + LOG.debug(_('_get_lun_set_info:Use default prefetch type. ' + 'Prefetch type:Intelligent.')) + + # No need to set Prefetch type for Dorado. + elif self.device_type['type'] == 'Dorado5100': + pooltype = 'Thick' + elif self.device_type['type'] == 'Dorado2100 G2': + return lunsetinfo + + poolsinfo = self._find_pool_info(pooltype) + if not poolsinfo: + err_msg = (_('_get_lun_set_info:No available pools! ' + 'Please check whether storage pool is created.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + pools = root.findall('LUN/StoragePool') + lunsetinfo['StoragePool'] = \ + self._get_maximum_pool(pools, poolsinfo, luntype) + + except Exception as err: + LOG.error(_('_get_lun_set_info:%s') % err) + raise exception.VolumeBackendAPIException(data=err) + + return lunsetinfo + + def _find_pool_info(self, pooltype): + """Return pools information created in storage device.""" + if pooltype == 'Thick': + cli_cmd = ('showrg') + else: + cli_cmd = ('showpool') + + out = self._execute_cli(cli_cmd) + + en = out.split('\r\n') + if len(en) <= 6: + return None + + pools_list = [] + for i in range(6, len(en) - 2): + r = en[i].split() + pools_list.append(r) + return pools_list + + def _get_maximum_pool(self, poolinconf, poolindev, luntype): + """Get the maximum pool from config file. + + According to the given pools' name in config file, + we select the pool of maximum free capacity. + """ + maxpoolid = None + maxpoolsize = 0 + if luntype == 'Thin': + nameindex = 1 + sizeindex = 4 + else: + nameindex = 5 + sizeindex = 3 + + for pool in poolinconf: + poolname = pool.attrib['Name'] + for pooldetail in poolindev: + if pooldetail[nameindex] == poolname: + if int(float(pooldetail[sizeindex])) > maxpoolsize: + maxpoolid = pooldetail[0] + maxpoolsize = int(float(pooldetail[sizeindex])) + break + if maxpoolid is not None: + return maxpoolid + else: + err_msg = (_('_get_maximum_pool:maxpoolid is None. ' + 'Please check config file and make sure ' + 'the "Name" in "StoragePool" is right.')) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _get_iscsi_info(self): + """Get iSCSI info from config file.""" + iscsiinfo = {} + root = self._read_xml() + try: + iscsiinfo['DefaultTargetIP'] = \ + root.findtext('iSCSI/DefaultTargetIP') + initiator_list = [] + for dic in root.findall('iSCSI/Initiator'): + initiator_list.append(dic.attrib) + iscsiinfo['Initiator'] = initiator_list + + except Exception as err: + LOG.error(_('_get_iscsi_info:%s') % str(err)) + + return iscsiinfo + + def _execute_cli(self, cmd): + """Build SSH connection to execute CLI commands. + + If the connection to first controller time out, + try to connect to the other controller. + """ + LOG.debug(_('CLI command:%s') % cmd) + connect_times = 0 + ip0 = self.login_info['ControllerIP0'] + ip1 = self.login_info['ControllerIP1'] + user = self.login_info['UserName'] + pwd = self.login_info['UserPassword'] + if not self.ssh_pool: + self.ssh_pool = SSHConn(ip0, 22, 30, user, pwd) + ssh_client = None + while True: + if connect_times == 1: + # Switch to the other controller. + self.ssh_pool.lock.acquire() + if ssh_client: + if ssh_client.server_ip == self.ssh_pool.ip: + if self.ssh_pool.ip == ip0: + self.ssh_pool.ip = ip1 + else: + self.ssh_pool.ip = ip0 + # Create a new client. + if ssh_client.chan: + ssh_client.chan.close() + ssh_client.chan = None + ssh_client.server_ip = None + ssh_client.close() + ssh_client = None + ssh_client = self.ssh_pool.create() + else: + self.ssh_pool.ip = ip1 + self.ssh_pool.lock.release() + try: + if not ssh_client: + ssh_client = self.ssh_pool.get() + # "server_ip" shows controller connecting with the ssh client. + if ('server_ip' not in ssh_client.__dict__ or + not ssh_client.server_ip): + self.ssh_pool.lock.acquire() + ssh_client.server_ip = self.ssh_pool.ip + self.ssh_pool.lock.release() + # An SSH client owns one "chan". + if ('chan' not in ssh_client.__dict__ or + not ssh_client.chan): + ssh_client.chan =\ + utils.create_channel(ssh_client, 600, 800) + + while True: + ssh_client.chan.send(cmd + '\n') + out = self.ssh_pool.ssh_read(ssh_client.chan, cmd, 20) + if out.find('(y/n)') > -1: + cmd = 'y' + else: + break + self.ssh_pool.put(ssh_client) + + index = out.find(user + ':/>') + if index > -1: + return out[index:] + else: + return out + + except Exception as err: + if connect_times < 1: + connect_times += 1 + continue + else: + if ssh_client: + self.ssh_pool.remove(ssh_client) + LOG.error(_('_execute_cli:%s') % err) + raise exception.VolumeBackendAPIException(data=err) + + def _name_translate(self, name): + """Form new names because of the 32-character limit on names.""" + newname = VOL_AND_SNAP_NAME_PREFIX + str(hash(name)) + + LOG.debug(_('_name_translate:Name in cinder: %(old)s, ' + 'new name in storage system: %(new)s') + % {'old': name, + 'new': newname}) + + return newname + + def _find_lun(self, name): + """Get the ID of a LUN with the given LUN name.""" + cli_cmd = ('showlun') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + if 'Dorado2100 G2' == self.device_type['type']: + d = 2 + elif 'Dorado5100' == self.device_type['type']: + d = 1 + else: + d = 0 + + for i in range(6, len(en) - 2): + r = en[i].replace('Not format', 'Notformat').split() + if r[6 - d] == name: + return r[0] + return None + + def _create_hostgroup(self, hostgroupname): + """Create a host group.""" + cli_cmd = ('createhostgroup -n %(name)s' + % {'name': hostgroupname}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_create_hostgroup:Failed to Create hostgroup. ' + 'Hostgroup name: %(name)s. ' + 'out:%(out)s.') + % {'name': hostgroupname, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _find_hostgroup(self, groupname): + """Get the given hostgroup ID.""" + cli_cmd = ('showhostgroup') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + for i in range(6, len(en) - 2): + r = en[i].split() + if r[1] == groupname: + return r[0] + return None + + def _add_host(self, hostname, hostgroupid): + """Add a new host.""" + cli_cmd = ('addhost -group %(groupid)s -n %(hostname)s -t 0' + % {'groupid': hostgroupid, + 'hostname': hostname}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_add_host:Failed to add host to hostgroup. ' + 'host name:%(host)s ' + 'hostgroup id:%(hostgroup)s ' + 'out:%(out)s') + % {'host': hostname, + 'hostgroup': hostgroupid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _check_initiator(self, ininame): + """Check whether the initiator is already added.""" + cli_cmd = ('showiscsiini -ini %(name)s' + % {'name': ininame}) + out = self._execute_cli(cli_cmd) + if out.find('Initiator Information') > -1: + return True + else: + return False + + def _add_initiator(self, ininame): + """Add a new initiator to storage device.""" + cli_cmd = ('addiscsiini -n %(name)s' + % {'name': ininame}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_add_initiator:Failed to add initiator. ' + 'initiator name:%(name)s ' + 'out:%(out)s') + % {'name': ininame, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _delete_initiator(self, ininame): + """Delete an initiator.""" + cli_cmd = ('deliscsiini -n %(name)s' + % {'name': ininame}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_delete_initiator:ERROE:Failed to delete initiator. ' + 'initiator name:%(name)s ' + 'out:%(out)s') + % {'name': ininame, + 'out': out}) + LOG.error(err_msg) + + def _find_host_in_hostgroup(self, hostname, hostgroupid): + """Get the given host ID.""" + cli_cmd = ('showhost -group %(groupid)s' + % {'groupid': hostgroupid}) + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) < 6: + return None + + for i in range(6, len(en) - 2): + r = en[i].split() + if r[1] == hostname: + return r[0] + return None + + def _get_hostport_info(self, hostid): + """Get hostports details of the given host.""" + cli_cmd = ('showhostport -host %(hostid)s' + % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) < 6: + return None + + hostportinfo = [] + list_key = ['id', 'name', 'info', 'type', 'hostid', + 'linkstatus', 'multioathtype'] + for i in range(6, len(en) - 2): + list_val = en[i].split() + hostport_dic = dict(map(None, list_key, list_val)) + hostportinfo.append(hostport_dic) + return hostportinfo + + def _add_hostport(self, portname, hostid, portinfo, multipathtype=0): + """Add a host port.""" + cli_cmd = ('addhostport -host %(id)s -type 5 ' + '-info %(info)s -n %(name)s -mtype %(mtype)s' + % {'id': hostid, + 'info': portinfo, + 'name': portname, + 'mtype': multipathtype}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_add_hostport:Failed to add hostport. ' + 'port name:%(port)s ' + 'port information:%(info)s ' + 'host id:%(host)s ' + 'out:%(out)s') + % {'port': portname, + 'info': portinfo, + 'host': hostid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _delete_hostport(self, portid): + """Delete a host port.""" + cli_cmd = ('delhostport -force -p %(portid)s' + % {'portid': portid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_delete_hostport:Failed to delete host port. ' + 'port id:%(portid)s') + % {'portid': portid}) + LOG.error(err_msg) + + def _get_tgt_iqn(self, iscsiip): + """Get target iSCSI iqn.""" + LOG.debug(_('_get_tgt_iqn:iSCSI IP is %s.') % iscsiip) + cli_cmd = ('showiscsitgtname') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) < 4: + return (None, None) + + index = en[4].find('iqn') + iqn_prefix = en[4][index:] + iqn_prefix.strip() + iscsiip_info = self._get_iscsi_ip_info(iscsiip) + if iscsiip_info: + if iscsiip_info['ctrid'] == 'A': + ctr = '0' + elif iscsiip_info['ctrid'] == 'B': + ctr = '1' + + interface = '0' + iscsiip_info['interfaceid'] + port = iscsiip_info['portid'].replace('P', '0') + iqn_suffix = ctr + '02' + interface + port + for i in range(0, len(iqn_suffix)): + if iqn_suffix[i] != '0': + iqn_suffix = iqn_suffix[i:] + break + if self.device_type['type'] == 'Tseries': + iqn = iqn_prefix + ':' + iqn_suffix + ':' \ + + iscsiip_info['ipaddress'] + elif self.device_type['type'] == "Dorado2100 G2": + iqn = iqn_prefix + ":" + iscsiip_info['ipaddress'] + "-" \ + + iqn_suffix + else: + iqn = iqn_prefix + ':' + iscsiip_info['ipaddress'] + + LOG.debug(_('_get_tgt_iqn:iSCSI target iqn is:%s') % iqn) + + return (iqn, iscsiip_info['ctrid']) + else: + return (None, None) + + def _get_iscsi_ip_info(self, iscsiip): + """Get iSCSI IP infomation of storage device.""" + cli_cmd = ('showiscsiip') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) < 6: + return None + + iscsiIPinfo = {} + for i in range(6, len(en) - 2): + r = en[i].split() + if r[3] == iscsiip: + iscsiIPinfo['ctrid'] = r[0] + iscsiIPinfo['interfaceid'] = r[1] + iscsiIPinfo['portid'] = r[2] + iscsiIPinfo['ipaddress'] = r[3] + return iscsiIPinfo + return None + + def _map_lun(self, lunid, hostid, new_hostlun_id): + """Map a lun to a host. + + Here we give the hostlun ID which starts from 1. + """ + cli_cmd = ('addhostmap -host %(hostid)s -devlun %(lunid)s ' + '-hostlun %(hostlunid)s' + % {'hostid': hostid, + 'lunid': lunid, + 'hostlunid': new_hostlun_id}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_map_lun:Failed to add hostmap. ' + 'hostid:%(host)s ' + 'lunid:%(lun)s ' + 'hostlunid:%(hostlunid)s ' + 'out:%(out)s') + % {'host': hostid, + 'lun': lunid, + 'hostlunid': new_hostlun_id, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _get_hostlunid(self, hostid, lunid): + """Get the hostLUN ID of a LUN according host ID and LUN ID.""" + mapinfo = self._get_map_info(hostid) + if mapinfo: + for map in mapinfo: + if map['devlunid'] == lunid: + return map['hostlunid'] + return None + + def _delete_map(self, mapid, attempts=1): + """Remove the map.""" + cli_cmd = ('delhostmap -force -map %(mapid)s' + % {'mapid': mapid}) + while attempts >= 0: + attempts -= 1 + out = self._execute_cli(cli_cmd) + + # We retry to delete host map 10s later if there are + # IOs accessing the system. + if re.search('command operates successfully', out): + break + else: + if re.search('there are IOs accessing the system', out): + time.sleep(10) + LOG.debug(_('_delete_map:There are IOs accessing ' + 'the system. Retry to delete host map. ' + 'map id:%(mapid)s') + % {'mapid': mapid}) + continue + else: + err_msg = (_('_delete_map:Failed to delete host map.' + ' mapid:%(mapid)s ' + 'out:%(out)s') + % {'mapid': mapid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _delete_host(self, hostid): + """Delete a host.""" + cli_cmd = ('delhost -force -host %(hostid)s' + % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_delete_host: Failed delete host. ' + 'host id:%(hostid)s ' + 'out:%(out)s') + % {'hostid': hostid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _get_map_info(self, hostid): + """Get map infomation of the given host. + + This method return a map information list. Every item in the list + is a dictionary. The dictionary includes three keys: mapid, + devlunid, hostlunid. These items are sorted by hostlunid value + from small to large. + """ + cli_cmd = ('showhostmap -host %(hostid)s' + % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + mapinfo = [] + list_tmp = [] + list_key = ['mapid', 'devlunid', 'hostlunid'] + for i in range(6, len(en) - 2): + list_tmp = en[i].split() + list_val = [list_tmp[0], list_tmp[2], list_tmp[4]] + dic = dict(map(None, list_key, list_val)) + inserted = False + mapinfo_length = len(mapinfo) + if mapinfo_length == 0: + mapinfo.append(dic) + continue + for index in range(0, mapinfo_length): + if (int(mapinfo[mapinfo_length - index - 1]['hostlunid']) < + int(dic['hostlunid'])): + mapinfo.insert(mapinfo_length - index, dic) + inserted = True + break + if not inserted: + mapinfo.insert(0, dic) + return mapinfo + + def _get_device_type(self): + """Get the storage device type and product version.""" + cli_cmd = ('showsys') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + for line in en: + if re.search('Device Type', line): + if re.search('T$', line): + device_type = 'Tseries' + elif re.search('Dorado2100 G2$', line): + device_type = 'Dorado2100 G2' + elif re.search('Dorado5100$', line): + device_type = 'Dorado5100' + else: + device_type = None + continue + + if re.search('Product Version', line): + if re.search('V100R+', line): + product_version = 'V100R' + else: + product_version = None + break + + r = {'type': device_type, 'version': product_version} + return r + + def _active_snapshot(self, snapshotid): + """Active a snapshot.""" + cli_cmd = ('actvsnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_active_snapshot:Failed to active snapshot. ' + 'snapshot id:%(name)s. ' + 'out:%(out)s') + % {'name': snapshotid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _disable_snapshot(self, snapshotid): + """Disable a snapshot.""" + cli_cmd = ('disablesnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_disable_snapshot:Failed to disable snapshot. ' + 'snapshot id:%(id)s. ' + 'out:%(out)s') + % {'id': snapshotid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _delete_snapshot(self, snapshotid): + """Delete a snapshot.""" + cli_cmd = ('delsnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_delete_snapshot:Failed to delete snapshot. ' + 'snapshot id:%(id)s. ' + 'out:%(out)s') + % {'id': snapshotid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _create_volume(self, name, size): + """Create a new volume with the given name and size.""" + lunsetinfo = self._get_lun_set_info() + cli_cmd = ('createlun -n %(name)s -lunsize %(size)s ' + '-wrtype %(wrtype)s ' + % {'name': name, + 'size': size, + 'wrtype': lunsetinfo['WriteType']}) + + # If write type is "write through", no need to set mirror switch. + if lunsetinfo['WriteType'] != '2': + cli_cmd = cli_cmd + ('-mirrorsw %(mirrorsw)s ' + % {'mirrorsw': lunsetinfo['MirrorSwitch']}) + + # Differences exist between "Thin" and "thick" LUN for CLI commands. + luntype = lunsetinfo['LUNType'] + if luntype == 'Thin': + dorado2100g2_luntype = '2' + Tseries = ('-pool %(pool)s ' + % {'pool': lunsetinfo['StoragePool']}) + else: + dorado2100g2_luntype = '3' + Tseries = ('-rg %(raidgroup)s -susize %(susize)s ' + % {'raidgroup': lunsetinfo['StoragePool'], + 'susize': lunsetinfo['StripUnitSize']}) + + prefetch_value_or_times = '' + pretype = '-pretype %s ' % lunsetinfo['PrefetchType'] + # If constant prefetch, we should set prefetch value. + if lunsetinfo['PrefetchType'] == '1': + prefetch_value_or_times = '-value %s' % lunsetinfo['PrefetchValue'] + # If variable prefetch, we should set prefetch mutiple. + elif lunsetinfo['PrefetchType'] == '2': + prefetch_value_or_times = '-times %s' % lunsetinfo['PrefetchTimes'] + + if self.device_type['type'] == 'Tseries': + cli_cmd = cli_cmd + Tseries + pretype + prefetch_value_or_times + + elif self.device_type['type'] == 'Dorado5100': + cli_cmd = cli_cmd + ('-rg %(raidgroup)s -susize %(susize)s' + % {'raidgroup': lunsetinfo['StoragePool'], + 'susize': lunsetinfo['StripUnitSize']}) + + elif self.device_type['type'] == 'Dorado2100 G2': + cli_cmd = cli_cmd + ('-type %(type)s' + % {'type': dorado2100g2_luntype}) + + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_create_volume:Failed to Create volume. ' + 'volume name:%(name)s. ' + 'out:%(out)s') + % {'name': name, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _delete_volume(self, name, lunid): + """Delete a volume.""" + cli_cmd = ('dellun -force -lun %s' % (lunid)) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_delete_volume:Failed to delete volume. ' + 'Volume name:%(name)s ' + 'out:%(out)s') + % {'name': name, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _create_luncopy(self, luncopyname, srclunid, tgtlunid): + """Create a LUNcopy.""" + cli_cmd = ('createluncopy -n %(name)s -l 4 -slun %(srclunid)s ' + '-tlun %(tgtlunid)s' + % {'name': luncopyname, + 'srclunid': srclunid, + 'tgtlunid': tgtlunid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_create_luncopy:Failed to Create LUNcopy. ' + 'LUNcopy name:%(name)s ' + 'out:%(out)s') + % {'name': luncopyname, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _start_luncopy(self, luncopyid): + """Starte a LUNcopy.""" + cli_cmd = ('chgluncopystatus -luncopy %(luncopyid)s -start' + % {'luncopyid': luncopyid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_start_luncopy:Failed to start LUNcopy. ' + 'LUNcopy id:%(luncopyid)s ' + 'out:%(out)s') + % {'luncopyid': luncopyid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _find_luncopy(self, luncopyname): + """Get the given LUNcopy's ID.""" + cli_cmd = ('showluncopy') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + for i in range(6, len(en) - 2): + r = en[i].split() + if r[0] == luncopyname: + luncopyid = r[1] + return luncopyid + return None + + def _wait_for_luncopy(self, luncopyname): + """Wait for LUNcopy to complete.""" + while True: + luncopy_info = self._get_luncopy_info(luncopyname) + if luncopy_info['state'] == 'Complete': + break + elif luncopy_info['status'] != 'Normal': + err_msg = (_('_wait_for_luncopy:LUNcopy status is not normal. ' + 'LUNcopy name:%(luncopyname)s') + % {'luncopyname': luncopyname}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + time.sleep(10) + + def _get_luncopy_info(self, luncopyname): + """Get LUNcopy information.""" + cli_cmd = ('showluncopy') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + luncopyinfo = {} + for i in range(6, len(en) - 2): + r = en[i].split() + if r[0] == luncopyname: + luncopyinfo['name'] = r[0] + luncopyinfo['id'] = r[1] + luncopyinfo['state'] = r[3] + luncopyinfo['status'] = r[4] + return luncopyinfo + return None + + def _delete_luncopy(self, luncopyid): + """Delete a LUNcopy.""" + cli_cmd = ('delluncopy -luncopy %(id)s' + % {'id': luncopyid}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_delete_luncopy:Failed to delete LUNcopy. ' + 'LUNcopy id:%(luncopyid)s ' + 'out:%(out)s') + % {'luncopyid': luncopyid, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _create_snapshot(self, snapshotname, srclunid): + """Create a snapshot with snapshot name and source LUN ID.""" + cli_cmd = ('createsnapshot -lun %(lunid)s -n %(snapname)s' + % {'lunid': srclunid, + 'snapname': snapshotname}) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_create_snapshot:Failed to Create snapshot. ' + 'Snapshot name:%(name)s ' + 'out:%(out)s') + % {'name': snapshotname, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _find_snapshot(self, snapshotname): + """Get the given snapshot ID.""" + cli_cmd = ('showsnapshot') + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 6: + return None + + for i in range(6, len(en) - 2): + r = en[i].split() + if r[0] == snapshotname: + return r[1] + return None + + def _get_lun_controller(self, lun_id): + cli_cmd = ('showlun -lun %s' % lun_id) + out = self._execute_cli(cli_cmd) + en = out.split('\r\n') + if len(en) <= 4: + return None + + if "Dorado2100 G2" == self.device_type['type']: + return en[10].split()[3] + else: + return en[12].split()[3] + + def _change_lun_controller(self, lun_id, controller): + cli_cmd = ('chglun -lun %s -c %s' % (lun_id, controller)) + out = self._execute_cli(cli_cmd) + if not re.search('command operates successfully', out): + err_msg = (_('_change_lun_controller:Failed to change lun owning ' + 'controller. lun id:%(lunid)s. ' + 'new controller:%(controller)s. ' + 'out:%(out)s') + % {'lunid': lun_id, + 'controller': controller, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _is_resource_pool_enough(self): + """Check whether resource pools' valid size is more than 1G.""" + cli_cmd = ('showrespool') + out = self._execute_cli(cli_cmd) + en = re.split('\r\n', out) + if len(en) <= 6: + LOG.error(_('_is_resource_pool_enough:Resource pool for snapshot ' + 'not be added.')) + return False + resource_pools = [] + list_key = ['pool id', 'size', 'usage', 'valid size', + 'alarm threshold'] + for i in range(6, len(en) - 2): + list_val = en[i].split() + dic = dict(map(None, list_key, list_val)) + resource_pools.append(dic) + + for pool in resource_pools: + if float(pool['valid size']) < 1024.0: + return False + return True + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'HuaweiISCSIDriver' + data['vendor_name'] = 'Huawei' + data['driver_version'] = '1.0' + data['storage_protocol'] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = self._get_free_capacity() + data['reserved_percentage'] = 0 + + self._stats = data + + def _get_free_capacity(self): + """Get total free capacity of pools.""" + self.login_info = self._get_login_info() + root = self._read_xml() + lun_type = root.findtext('LUN/LUNType') + if self.device_type['type'] == 'Dorado2100 G2': + lun_type = 'Thin' + elif (self.device_type['type'] == 'Dorado5100' or not lun_type): + lun_type = 'Thick' + poolinfo_dev = self._find_pool_info(lun_type) + pools_conf = root.findall('LUN/StoragePool') + total_free_capacity = 0.0 + for poolinfo in poolinfo_dev: + if self.device_type['type'] == 'Dorado2100 G2': + total_free_capacity += float(poolinfo[2]) + continue + for pool in pools_conf: + if ((self.device_type['type'] == 'Dorado5100') and + (poolinfo[5] == pool.attrib['Name'])): + total_free_capacity += float(poolinfo[3]) + break + else: + if ((lun_type == 'Thick') and + (poolinfo[5] == pool.attrib['Name'])): + total_free_capacity += float(poolinfo[3]) + break + elif poolinfo[1] == pool.attrib['Name']: + total_free_capacity += float(poolinfo[4]) + break + + return total_free_capacity / 1024 diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py new file mode 100644 index 0000000000..7317c2901b --- /dev/null +++ b/cinder/volume/drivers/lvm.py @@ -0,0 +1,688 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Driver for Linux servers running LVM. + +""" + +import math +import os +import re + +from oslo.config import cfg + +from cinder.brick.iscsi import iscsi +from cinder import exception +from cinder import flags +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('volume_group', + default='cinder-volumes', + help='Name for the VG that will contain exported volumes'), + cfg.StrOpt('volume_clear', + default='zero', + help='Method used to wipe old volumes (valid options are: ' + 'none, zero, shred)'), + cfg.IntOpt('volume_clear_size', + default=0, + help='Size in MiB to wipe at start of old volumes. 0 => all'), + cfg.StrOpt('volume_dd_blocksize', + default='1M', + help='The default block size used when clearing volumes'), + cfg.StrOpt('pool_size', + default=None, + help='Size of thin provisioning pool ' + '(None uses entire cinder VG)'), + cfg.IntOpt('lvm_mirrors', + default=0, + help='If set, create lvms with multiple mirrors. Note that ' + 'this requires lvm_mirrors + 2 pvs with available space'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(volume_opts) + + +class LVMVolumeDriver(driver.VolumeDriver): + """Executes commands relating to Volumes.""" + + VERSION = '1.0' + + def __init__(self, *args, **kwargs): + super(LVMVolumeDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + out, err = self._execute('vgs', '--noheadings', '-o', 'name', + run_as_root=True) + volume_groups = out.split() + if self.configuration.volume_group not in volume_groups: + exception_message = (_("volume group %s doesn't exist") + % self.configuration.volume_group) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _create_volume(self, volume_name, sizestr): + cmd = ['lvcreate', '-L', sizestr, '-n', volume_name, + self.configuration.volume_group] + if self.configuration.lvm_mirrors: + cmd += ['-m', self.configuration.lvm_mirrors, '--nosync'] + terras = int(sizestr[:-1]) / 1024.0 + if terras >= 1.5: + rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) + # NOTE(vish): Next power of two for region size. See: + # http://red.ht/U2BPOD + cmd += ['-R', str(rsize)] + + self._try_execute(*cmd, run_as_root=True) + + def _copy_volume(self, srcstr, deststr, size_in_g, clearing=False): + # Use O_DIRECT to avoid thrashing the system buffer cache + extra_flags = ['iflag=direct', 'oflag=direct'] + + # Check whether O_DIRECT is supported + try: + self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, + *extra_flags, run_as_root=True) + except exception.ProcessExecutionError: + extra_flags = [] + + # If the volume is being unprovisioned then + # request the data is persisted before returning, + # so that it's not discarded from the cache. + if clearing and not extra_flags: + extra_flags.append('conv=fdatasync') + + # Perform the copy + self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), + 'bs=%s' % self.configuration.volume_dd_blocksize, + *extra_flags, run_as_root=True) + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (self.configuration.volume_group, volume_name) + try: + self._try_execute('lvdisplay', path_name, run_as_root=True) + except Exception as e: + # If the volume isn't present + return True + return False + + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + dev_path = self.local_path(volume) + if os.path.exists(dev_path): + self.clear_volume(volume) + + self._try_execute('lvremove', '-f', "%s/%s" % + (self.configuration.volume_group, + self._escape_snapshot(volume['name'])), + run_as_root=True) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + self._copy_volume(self.local_path(snapshot), self.local_path(volume), + snapshot['volume_size']) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + if self._volume_not_present(volume['name']): + # If the volume isn't present, then don't attempt to delete + return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (self.configuration.volume_group, + volume['name']), + run_as_root=True) + # fake_execute returns None resulting unit test error + if out: + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) + + self._delete_volume(volume, volume['size']) + + def clear_volume(self, volume): + """unprovision old volumes to prevent data leaking between users.""" + + vol_path = self.local_path(volume) + size_in_g = volume.get('size') + size_in_m = self.configuration.volume_clear_size + + if not size_in_g: + LOG.warning(_("Size for volume: %s not found, " + "skipping secure delete.") % volume['name']) + return + + if self.configuration.volume_clear == 'none': + return + + LOG.info(_("Performing secure delete on volume: %s") % volume['id']) + + if self.configuration.volume_clear == 'zero': + if size_in_m == 0: + return self._copy_volume('/dev/zero', + vol_path, size_in_g, + clearing=True) + else: + clear_cmd = ['shred', '-n0', '-z', '-s%dMiB' % size_in_m] + elif self.configuration.volume_clear == 'shred': + clear_cmd = ['shred', '-n3'] + if size_in_m: + clear_cmd.append('-s%dMiB' % size_in_m) + else: + LOG.error(_("Error unrecognized volume_clear option: %s"), + self.configuration.volume_clear) + return + + clear_cmd.append(vol_path) + self._execute(*clear_cmd, run_as_root=True) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + orig_lv_name = "%s/%s" % (self.configuration.volume_group, + snapshot['volume_name']) + self._try_execute('lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name, run_as_root=True) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + LOG.warning(_("snapshot: %s not found, " + "skipping delete operations") % snapshot['name']) + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) + + def local_path(self, volume): + # NOTE(vish): stops deprecation warning + escaped_group = self.configuration.volume_group.replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume)) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + LOG.info(_('Creating clone of volume: %s') % src_vref['id']) + volume_name = FLAGS.volume_name_template % src_vref['id'] + temp_id = 'tmp-snap-%s' % src_vref['id'] + temp_snapshot = {'volume_name': volume_name, + 'size': src_vref['size'], + 'volume_size': src_vref['size'], + 'name': 'clone-snap-%s' % src_vref['id'], + 'id': temp_id} + self.create_snapshot(temp_snapshot) + self._create_volume(volume['name'], self._sizestr(volume['size'])) + try: + self._copy_volume(self.local_path(temp_snapshot), + self.local_path(volume), + src_vref['size']) + finally: + self.delete_snapshot(temp_snapshot) + + def clone_image(self, volume, image_location): + return False + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + volume = self.db.volume_get(context, backup['volume_id']) + volume_path = self.local_path(volume) + with utils.temporary_chown(volume_path): + with utils.file_open(volume_path) as volume_file: + backup_service.backup(backup, volume_file) + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + volume_path = self.local_path(volume) + with utils.temporary_chown(volume_path): + with utils.file_open(volume_path, 'wb') as volume_file: + backup_service.restore(backup, volume['id'], volume_file) + + +class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver): + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + + ``provider_location`` + if present, contains the iSCSI target information in the same + format as an ietadm discovery + i.e. ':, ' + + ``provider_auth`` + if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ + + def __init__(self, *args, **kwargs): + self.tgtadm = iscsi.get_target_admin() + super(LVMISCSIDriver, self).__init__(*args, **kwargs) + + def set_execute(self, execute): + super(LVMISCSIDriver, self).set_execute(execute) + self.tgtadm.set_execute(execute) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # cooresponding target admin class + + if isinstance(self.tgtadm, iscsi.LioAdm): + try: + volume_info = self.db.volume_get(context, volume['id']) + (auth_method, + auth_user, + auth_pass) = volume_info['provider_auth'].split(' ', 3) + chap_auth = self._iscsi_authentication(auth_method, + auth_user, + auth_pass) + except exception.NotFound: + LOG.debug("volume_info:", volume_info) + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provision for volume: %s"), volume['id']) + return + + iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) + iscsi_target = 1 + + self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target, + 0, volume_path, chap_auth, + check_exit_code=False) + return + + if not isinstance(self.tgtadm, iscsi.TgtAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + else: + iscsi_target = 1 # dummy value when using TgtAdm + + chap_auth = None + + # Check for https://bugs.launchpad.net/cinder/+bug/1065702 + old_name = None + volume_name = volume['name'] + if (volume['provider_location'] is not None and + volume['name'] not in volume['provider_location']): + + msg = _('Detected inconsistency in provider_location id') + LOG.debug(msg) + old_name = self._fix_id_migration(context, volume) + if 'in-use' in volume['status']: + volume_name = old_name + old_name = None + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume_name) + volume_path = "/dev/%s/%s" % (self.configuration.volume_group, + volume_name) + + # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need + # should clean this all up at some point in the future + self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target, + 0, volume_path, chap_auth, + check_exit_code=False, + old_name=old_name) + + def _fix_id_migration(self, context, volume): + """Fix provider_location and dev files to address bug 1065702. + + For volumes that the provider_location has NOT been updated + and are not currently in-use we'll create a new iscsi target + and remove the persist file. + + If the volume is in-use, we'll just stick with the old name + and when detach is called we'll feed back into ensure_export + again if necessary and fix things up then. + + Details at: https://bugs.launchpad.net/cinder/+bug/1065702 + """ + + model_update = {} + pattern = re.compile(r":|\s") + fields = pattern.split(volume['provider_location']) + old_name = fields[3] + + volume['provider_location'] = \ + volume['provider_location'].replace(old_name, volume['name']) + model_update['provider_location'] = volume['provider_location'] + + self.db.volume_update(context, volume['id'], model_update) + + start = os.getcwd() + os.chdir('/dev/%s' % self.configuration.volume_group) + + try: + (out, err) = self._execute('readlink', old_name) + except exception.ProcessExecutionError: + link_path = '/dev/%s/%s' % (self.configuration.volume_group, + old_name) + LOG.debug(_('Symbolic link %s not found') % link_path) + os.chdir(start) + return + + rel_path = out.rstrip() + self._execute('ln', + '-s', + rel_path, volume['name'], + run_as_root=True) + os.chdir(start) + return old_name + + def _ensure_iscsi_targets(self, context, host): + """Ensure that target ids have been created in datastore.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # cooresponding target admin class + if not isinstance(self.tgtadm, iscsi.TgtAdm): + host_iscsi_targets = self.db.iscsi_target_count_by_host(context, + host) + if host_iscsi_targets >= self.configuration.iscsi_num_targets: + return + + # NOTE(vish): Target ids start at 1, not 0. + target_end = self.configuration.iscsi_num_targets + 1 + for target_num in xrange(1, target_end): + target = {'host': host, 'target_num': target_num} + self.db.iscsi_target_create_safe(context, target) + + def create_export(self, context, volume): + """Creates an export for a logical volume.""" + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + volume_path = "/dev/%s/%s" % (self.configuration.volume_group, + volume['name']) + model_update = {} + + # TODO(jdg): In the future move all of the dependent stuff into the + # cooresponding target admin class + if not isinstance(self.tgtadm, iscsi.TgtAdm): + lun = 0 + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + else: + lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 + iscsi_target = 0 # NOTE(jdg): Not used by tgtadm + + # Use the same method to generate the username and the password. + chap_username = utils.generate_username() + chap_password = utils.generate_password() + chap_auth = self._iscsi_authentication('IncomingUser', chap_username, + chap_password) + # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need + # should clean this all up at some point in the future + tid = self.tgtadm.create_iscsi_target(iscsi_name, + iscsi_target, + 0, + volume_path, + chap_auth) + model_update['provider_location'] = self._iscsi_location( + self.configuration.iscsi_ip_address, tid, iscsi_name, lun) + model_update['provider_auth'] = self._iscsi_authentication( + 'CHAP', chap_username, chap_password) + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # cooresponding target admin class + + if isinstance(self.tgtadm, iscsi.LioAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + + self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id']) + + return + + elif not isinstance(self.tgtadm, iscsi.TgtAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + else: + iscsi_target = 0 + + try: + + # NOTE: provider_location may be unset if the volume hasn't + # been exported + location = volume['provider_location'].split(' ') + iqn = location[1] + + # ietadm show will exit with an error + # this export has already been removed + self.tgtadm.show_target(iscsi_target, iqn=iqn) + + except Exception as e: + LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %s"), volume['id']) + return + + self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id']) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + + # Note(zhiteng): These information are driver/backend specific, + # each driver may define these values in its own config options + # or fetch from driver specific configuration file. + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'LVM_iSCSI' + data["vendor_name"] = 'Open Source' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 0 + data['free_capacity_gb'] = 0 + data['reserved_percentage'] = self.configuration.reserved_percentage + data['QoS_support'] = False + + try: + out, err = self._execute('vgs', '--noheadings', '--nosuffix', + '--unit=G', '-o', 'name,size,free', + self.configuration.volume_group, + run_as_root=True) + except exception.ProcessExecutionError as exc: + LOG.error(_("Error retrieving volume status: "), exc.stderr) + out = False + + if out: + volume = out.split() + data['total_capacity_gb'] = float(volume[1].replace(',', '.')) + data['free_capacity_gb'] = float(volume[2].replace(',', '.')) + + self._stats = data + + def _iscsi_location(self, ip, target, iqn, lun=None): + return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port, + target, iqn, lun) + + def _iscsi_authentication(self, chap, name, password): + return "%s %s %s" % (chap, name, password) + + +class ThinLVMVolumeDriver(LVMISCSIDriver): + """Subclass for thin provisioned LVM's.""" + + VERSION = '1.0' + + def __init__(self, *args, **kwargs): + super(ThinLVMVolumeDriver, self).__init__(*args, **kwargs) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + out, err = self._execute('lvs', '--option', + 'name', '--noheadings', + run_as_root=True) + pool_name = "%s-pool" % FLAGS.volume_group + if pool_name not in out: + if not FLAGS.pool_size: + out, err = self._execute('vgs', FLAGS.volume_group, + '--noheadings', '--options', + 'name,size', run_as_root=True) + size = re.sub(r'[\.][\d][\d]', '', out.split()[1]) + else: + size = "%s" % FLAGS.pool_size + + pool_path = '%s/%s' % (FLAGS.volume_group, pool_name) + out, err = self._execute('lvcreate', '-T', '-L', size, + pool_path, run_as_root=True) + + def _do_lvm_snapshot(self, src_lvm_name, dest_vref, is_cinder_snap=True): + if is_cinder_snap: + new_name = self._escape_snapshot(dest_vref['name']) + else: + new_name = dest_vref['name'] + + self._try_execute('lvcreate', '-s', '-n', new_name, + src_lvm_name, run_as_root=True) + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + sizestr = self._sizestr(volume['size']) + vg_name = ("%s/%s-pool" % (FLAGS.volume_group, FLAGS.volume_group)) + self._try_execute('lvcreate', '-T', '-V', sizestr, '-n', + volume['name'], vg_name, run_as_root=True) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + if self._volume_not_present(volume['name']): + return True + self._try_execute('lvremove', '-f', "%s/%s" % + (FLAGS.volume_group, + self._escape_snapshot(volume['name'])), + run_as_root=True) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + LOG.info(_('Creating clone of volume: %s') % src_vref['id']) + orig_lv_name = "%s/%s" % (FLAGS.volume_group, src_vref['name']) + self._do_lvm_snapshot(orig_lv_name, volume, False) + + def create_snapshot(self, snapshot): + """Creates a snapshot of a volume.""" + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._do_lvm_snapshot(orig_lv_name, snapshot) + + def get_volume_stats(self, refresh=False): + """Get volume status. + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or self.__class__.__name__ + data["vendor_name"] = 'Open Source' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + data['reserved_percentage'] = self.configuration.reserved_percentage + data['QoS_support'] = False + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + self._stats = data diff --git a/cinder/volume/drivers/netapp/__init__.py b/cinder/volume/drivers/netapp/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/netapp/api.py b/cinder/volume/drivers/netapp/api.py new file mode 100644 index 0000000000..241a22daa0 --- /dev/null +++ b/cinder/volume/drivers/netapp/api.py @@ -0,0 +1,410 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +NetApp api for ONTAP and OnCommand DFM. + +Contains classes required to issue api calls to ONTAP and OnCommand DFM. +""" + +from lxml import etree +import urllib2 + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class NaServer(object): + """Encapsulates server connection logic.""" + + TRANSPORT_TYPE_HTTP = 'http' + TRANSPORT_TYPE_HTTPS = 'https' + SERVER_TYPE_FILER = 'filer' + SERVER_TYPE_DFM = 'dfm' + URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' + URL_DFM = 'apis/XMLrequest' + NETAPP_NS = 'http://www.netapp.com/filer/admin' + STYLE_LOGIN_PASSWORD = 'basic_auth' + STYLE_CERTIFICATE = 'certificate_auth' + + def __init__(self, host, server_type=SERVER_TYPE_FILER, + transport_type=TRANSPORT_TYPE_HTTP, + style=STYLE_LOGIN_PASSWORD, username=None, + password=None): + self._host = host + self.set_server_type(server_type) + self.set_transport_type(transport_type) + self.set_style(style) + self._username = username + self._password = password + self._refresh_conn = True + + def get_transport_type(self): + """Get the transport type protocol.""" + return self._protocol + + def set_transport_type(self, transport_type): + """Set the transport type protocol for api. + + Supports http and https transport types. + """ + if transport_type.lower() not in ( + NaServer.TRANSPORT_TYPE_HTTP, + NaServer.TRANSPORT_TYPE_HTTPS): + raise ValueError('Unsupported transport type') + self._protocol = transport_type.lower() + if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: + if self._server_type == NaServer.SERVER_TYPE_FILER: + self.set_port(80) + else: + self.set_port(8088) + else: + if self._server_type == NaServer.SERVER_TYPE_FILER: + self.set_port(443) + else: + self.set_port(8488) + self._refresh_conn = True + + def get_style(self): + """Get the authorization style for communicating with the server.""" + return self._auth_style + + def set_style(self, style): + """Set the authorization style for communicating with the server. + + Supports basic_auth for now. Certificate_auth mode to be done. + """ + if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, + NaServer.STYLE_CERTIFICATE): + raise ValueError('Unsupported authentication style') + self._auth_style = style.lower() + + def get_server_type(self): + """Get the target server type.""" + return self._server_type + + def set_server_type(self, server_type): + """Set the target server type. + + Supports filer and dfm server types. + """ + if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, + NaServer.SERVER_TYPE_DFM): + raise ValueError('Unsupported server type') + self._server_type = server_type.lower() + if self._server_type == NaServer.SERVER_TYPE_FILER: + self._url = NaServer.URL_FILER + else: + self._url = NaServer.URL_DFM + self._ns = NaServer.NETAPP_NS + self._refresh_conn = True + + def set_api_version(self, major, minor): + """Set the api version.""" + try: + self._api_major_version = int(major) + self._api_minor_version = int(minor) + self._api_version = str(major) + "." + str(minor) + except ValueError: + raise ValueError('Major and minor versions must be integers') + self._refresh_conn = True + + def get_api_version(self): + """Gets the api version.""" + if hasattr(self, '_api_version'): + return self._api_version + return self._api_version + + def set_port(self, port): + """Set the server communication port.""" + try: + int(port) + except ValueError: + raise ValueError('Port must be integer') + self._port = str(port) + self._refresh_conn = True + + def get_port(self): + """Get the server communication port.""" + return self._port + + def set_timeout(self, seconds): + """Sets the timeout in seconds.""" + try: + self._timeout = int(seconds) + except ValueError: + raise ValueError('timeout in seconds must be integer') + + def get_timeout(self): + """Gets the timeout in seconds if set.""" + if hasattr(self, '_timeout'): + return self._timeout + return None + + def get_vfiler(self): + """Get the vfiler to use in tunneling.""" + return self._vfiler + + def set_vfiler(self, vfiler): + """Set the vfiler to use if tunneling gets enabled.""" + self._vfiler = vfiler + + def get_vserver(self): + """Get the vserver to use in tunneling.""" + return self._vserver + + def set_vserver(self, vserver): + """Set the vserver to use if tunneling gets enabled.""" + self._vserver = vserver + + def set_username(self, username): + """Set the user name for authentication.""" + self._username = username + self._refresh_conn = True + + def set_password(self, password): + """Set the password for authentication.""" + self._password = password + self._refresh_conn = True + + def invoke_elem(self, na_element, enable_tunneling=False): + """Invoke the api on the server.""" + if na_element and not isinstance(na_element, NaElement): + ValueError('NaElement must be supplied to invoke api') + request = self._create_request(na_element, enable_tunneling) + if not hasattr(self, '_opener') or not self._opener \ + or self._refresh_conn: + self._build_opener() + try: + if hasattr(self, '_timeout'): + response = self._opener.open(request, timeout=self._timeout) + else: + response = self._opener.open(request) + except urllib2.HTTPError as e: + raise NaApiError(e.code, e.msg) + except Exception as e: + raise NaApiError('Unexpected error', e) + xml = response.read() + return self._get_result(xml) + + def invoke_successfully(self, na_element, enable_tunneling=False): + """Invokes api and checks execution status as success. + + Need to set enable_tunneling to True explicitly to achieve it. + This helps to use same connection instance to enable or disable + tunneling. The vserver or vfiler should be set before this call + otherwise tunneling remains disabled. + """ + result = self.invoke_elem(na_element, enable_tunneling) + if result.has_attr('status') and result.get_attr('status') == 'passed': + return result + code = result.get_attr('errno')\ + or result.get_child_content('errorno')\ + or 'ESTATUSFAILED' + msg = result.get_attr('reason')\ + or result.get_child_content('reason')\ + or 'Execution status is failed due to unknown reason' + raise NaApiError(code, msg) + + def _create_request(self, na_element, enable_tunneling=False): + """Creates request in the desired format.""" + netapp_elem = NaElement('netapp') + netapp_elem.add_attr('xmlns', self._ns) + if hasattr(self, '_api_version'): + netapp_elem.add_attr('version', self._api_version) + if enable_tunneling: + self._enable_tunnel_request(netapp_elem) + netapp_elem.add_child_elem(na_element) + request_d = netapp_elem.to_string() + request = urllib2.Request( + self._get_url(), data=request_d, + headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) + return request + + def _enable_tunnel_request(self, netapp_elem): + """Enables vserver or vfiler tunneling.""" + if hasattr(self, '_vfiler') and self._vfiler: + if hasattr(self, '_api_major_version') and \ + hasattr(self, '_api_minor_version') and \ + self._api_major_version >= 1 and \ + self._api_minor_version >= 7: + netapp_elem.add_attr('vfiler', self._vfiler) + else: + raise ValueError('ontapi version has to be atleast 1.7' + ' to send request to vfiler') + if hasattr(self, '_vserver') and self._vserver: + if hasattr(self, '_api_major_version') and \ + hasattr(self, '_api_minor_version') and \ + self._api_major_version >= 1 and \ + self._api_minor_version >= 15: + netapp_elem.add_attr('vfiler', self._vserver) + else: + raise ValueError('ontapi version has to be atleast 1.15' + ' to send request to vserver') + + def _parse_response(self, response): + """Get the NaElement for the response.""" + if not response: + raise NaApiError('No response received') + xml = etree.XML(response) + return NaElement(xml) + + def _get_result(self, response): + """Gets the call result.""" + processed_response = self._parse_response(response) + return processed_response.get_child_by_name('results') + + def _get_url(self): + return '%s://%s:%s/%s' % (self._protocol, self._host, self._port, + self._url) + + def _build_opener(self): + if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD: + auth_handler = self._create_basic_auth_handler() + else: + auth_handler = self._create_certificate_auth_handler() + opener = urllib2.build_opener(auth_handler) + self._opener = opener + + def _create_basic_auth_handler(self): + password_man = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_man.add_password(None, self._get_url(), self._username, + self._password) + auth_handler = urllib2.HTTPBasicAuthHandler(password_man) + return auth_handler + + def _create_certificate_auth_handler(self): + raise NotImplementedError() + + +class NaElement(object): + """Class wraps basic building block for NetApp api request.""" + + def __init__(self, name): + """Name of the element or etree.Element.""" + if isinstance(name, etree._Element): + self._element = name + else: + self._element = etree.Element(name) + + def get_name(self): + """Returns the tag name of the element.""" + return self._element.tag + + def set_content(self, text): + """Set the text for the element.""" + self._element.text = text + + def get_content(self): + """Get the text for the element.""" + return self._element.text + + def add_attr(self, name, value): + """Add the attribute to the element.""" + self._element.set(name, value) + + def add_attrs(self, **attrs): + """Add multiple attributes to the element.""" + for attr in attrs.keys(): + self._element.set(attr, attrs.get(attr)) + + def add_child_elem(self, na_element): + """Add the child element to the element.""" + if isinstance(na_element, NaElement): + self._element.append(na_element._element) + return + raise + + def get_child_by_name(self, name): + """Get the child element by the tag name.""" + for child in self._element.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return NaElement(child) + return None + + def get_child_content(self, name): + """Get the content of the child.""" + for child in self._element.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return child.text + return None + + def get_children(self): + """Get the children for the element.""" + return [NaElement(el) for el in self._element.iterchildren()] + + def has_attr(self, name): + """Checks whether element has attribute.""" + attributes = self._element.attrib or {} + return name in attributes.keys() + + def get_attr(self, name): + """Get the attribute with the given name.""" + attributes = self._element.attrib or {} + return attributes.get(name) + + def get_attr_names(self): + """Returns the list of attribute names.""" + attributes = self._element.attrib or {} + return attributes.keys() + + def add_new_child(self, name, content, convert=False): + """Add child with tag name and context. + + Convert replaces entity refs to chars.""" + child = NaElement(name) + if convert: + content = NaElement._convert_entity_refs(content) + child.set_content(content) + self.add_child_elem(child) + + @staticmethod + def _convert_entity_refs(text): + """Converts entity refs to chars to handle etree auto conversions.""" + text = text.replace("<", "<") + text = text.replace(">", ">") + return text + + @staticmethod + def create_node_with_children(node, **children): + """Creates and returns named node with children.""" + parent = NaElement(node) + for child in children.keys(): + parent.add_new_child(child, children.get(child, None)) + return parent + + def add_node_with_children(self, node, **children): + """Creates named node with children.""" + parent = NaElement.create_node_with_children(node, **children) + self.add_child_elem(parent) + + def to_string(self, pretty=False, method='xml', encoding='UTF-8'): + """Prints the element to string.""" + return etree.tostring(self._element, method=method, encoding=encoding, + pretty_print=pretty) + + +class NaApiError(Exception): + """Base exception class for NetApp api errors.""" + + def __init__(self, code='unknown', message='unknown'): + self.code = code + self.message = message + + def __str__(self, *args, **kwargs): + return 'NetApp api failed. Reason - %s:%s' % (self.code, self.message) diff --git a/cinder/volume/drivers/netapp/iscsi.py b/cinder/volume/drivers/netapp/iscsi.py new file mode 100644 index 0000000000..7fe24b6f79 --- /dev/null +++ b/cinder/volume/drivers/netapp/iscsi.py @@ -0,0 +1,2528 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for NetApp storage systems. + +This driver requires NetApp OnCommand 5.0 and one or more Data +ONTAP 7-mode storage systems with installed iSCSI licenses. + +""" + +import time +import uuid + +from oslo.config import cfg +import suds +from suds import client +from suds.sax import text + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.netapp.api import NaApiError +from cinder.volume.drivers.netapp.api import NaElement +from cinder.volume.drivers.netapp.api import NaServer +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +netapp_opts = [ + cfg.StrOpt('netapp_wsdl_url', + default=None, + help='URL of the WSDL file for the DFM/Webservice server'), + cfg.StrOpt('netapp_login', + default=None, + help='User name for the DFM/Controller server'), + cfg.StrOpt('netapp_password', + default=None, + help='Password for the DFM/Controller server', + secret=True), + cfg.StrOpt('netapp_server_hostname', + default=None, + help='Hostname for the DFM/Controller server'), + cfg.IntOpt('netapp_server_port', + default=8088, + help='Port number for the DFM/Controller server'), + cfg.StrOpt('netapp_storage_service', + default=None, + help=('Storage service to use for provisioning ' + '(when volume_type=None)')), + cfg.StrOpt('netapp_storage_service_prefix', + default=None, + help=('Prefix of storage service name to use for ' + 'provisioning (volume_type name will be appended)')), + cfg.StrOpt('netapp_vfiler', + default=None, + help='Vfiler to use for provisioning'), + cfg.StrOpt('netapp_transport_type', + default='http', + help='Transport type protocol'), + cfg.StrOpt('netapp_vserver', + default='openstack', + help='Cluster vserver to use for provisioning'), + cfg.FloatOpt('netapp_size_multiplier', + default=1.2, + help='Volume size multiplier to ensure while creation'), + cfg.StrOpt('netapp_volume_list', + default='', + help='Comma separated eligible volumes for provisioning on' + ' 7 mode'), ] + + +class DfmDataset(object): + def __init__(self, id, name, project, type): + self.id = id + self.name = name + self.project = project + self.type = type + + +class DfmLun(object): + def __init__(self, dataset, lunpath, id): + self.dataset = dataset + self.lunpath = lunpath + self.id = id + + +class NetAppISCSIDriver(driver.ISCSIDriver): + """NetApp iSCSI volume driver.""" + + IGROUP_PREFIX = 'openstack-' + DATASET_PREFIX = 'OpenStack_' + DATASET_METADATA_PROJECT_KEY = 'OpenStackProject' + DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType' + + def __init__(self, *args, **kwargs): + super(NetAppISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_opts) + self.discovered_luns = [] + self.discovered_datasets = [] + self.lun_table = {} + + def _check_fail(self, request, response): + """Utility routine to handle checking ZAPI failures.""" + if 'failed' == response.Status: + name = request.Name + reason = response.Reason + msg = _('API %(name)s failed: %(reason)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + + def _create_client(self, **kwargs): + """Instantiate a web services client. + + This method creates a "suds" client to make web services calls to the + DFM server. Note that the WSDL file is quite large and may take + a few seconds to parse. + """ + wsdl_url = kwargs['wsdl_url'] + LOG.debug(_('Using WSDL: %s') % wsdl_url) + if kwargs['cache']: + self.client = client.Client(wsdl_url, username=kwargs['login'], + password=kwargs['password']) + else: + self.client = client.Client(wsdl_url, username=kwargs['login'], + password=kwargs['password'], + cache=None) + soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'], + kwargs['port']) + LOG.debug(_('Using DFM server: %s') % soap_url) + self.client.set_options(location=soap_url) + + def _set_storage_service(self, storage_service): + """Set the storage service to use for provisioning.""" + LOG.debug(_('Using storage service: %s') % storage_service) + self.storage_service = storage_service + + def _set_storage_service_prefix(self, storage_service_prefix): + """Set the storage service prefix to use for provisioning.""" + LOG.debug(_('Using storage service prefix: %s') % + storage_service_prefix) + self.storage_service_prefix = storage_service_prefix + + def _set_vfiler(self, vfiler): + """Set the vfiler to use for provisioning.""" + LOG.debug(_('Using vfiler: %s') % vfiler) + self.vfiler = vfiler + + def _check_flags(self): + """Ensure that the flags we care about are set.""" + required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', + 'netapp_server_hostname', 'netapp_server_port'] + for flag in required_flags: + if not getattr(self.configuration, flag, None): + raise exception.InvalidInput(reason=_('%s is not set') % flag) + if not (self.configuration.netapp_storage_service or + self.configuration.netapp_storage_service_prefix): + raise exception.InvalidInput( + reason=_('Either ' + 'netapp_storage_service or ' + 'netapp_storage_service_prefix must ' + 'be set')) + + def do_setup(self, context): + """Setup the NetApp Volume driver. + + Called one time by the manager after the driver is loaded. + Validate the flags we care about and setup the suds (web services) + client. + """ + self._check_flags() + self._create_client( + wsdl_url=self.configuration.netapp_wsdl_url, + login=self.configuration.netapp_login, + password=self.configuration.netapp_password, + hostname=self.configuration.netapp_server_hostname, + port=self.configuration.netapp_server_port, cache=True) + self._set_storage_service(self.configuration.netapp_storage_service) + self._set_storage_service_prefix( + self.configuration.netapp_storage_service_prefix) + self._set_vfiler(self.configuration.netapp_vfiler) + + def check_for_setup_error(self): + """Check that the driver is working and can communicate. + + Invoke a web services API to make sure we can talk to the server. + Also perform the discovery of datasets and LUNs from DFM. + """ + self.client.service.DfmAbout() + LOG.debug(_("Connected to DFM server")) + self._discover_luns() + + def _get_datasets(self): + """Get the list of datasets from DFM.""" + server = self.client.service + res = server.DatasetListInfoIterStart(IncludeMetadata=True) + tag = res.Tag + datasets = [] + try: + while True: + res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100) + if not res.Datasets: + break + datasets.extend(res.Datasets.DatasetInfo) + finally: + server.DatasetListInfoIterEnd(Tag=tag) + return datasets + + def _discover_dataset_luns(self, dataset, volume): + """Discover all of the LUNs in a dataset.""" + server = self.client.service + res = server.DatasetMemberListInfoIterStart( + DatasetNameOrId=dataset.id, + IncludeExportsInfo=True, + IncludeIndirect=True, + MemberType='lun_path') + tag = res.Tag + suffix = None + if volume: + suffix = '/' + volume + try: + while True: + res = server.DatasetMemberListInfoIterNext(Tag=tag, + Maximum=100) + if (not hasattr(res, 'DatasetMembers') or + not res.DatasetMembers): + break + for member in res.DatasetMembers.DatasetMemberInfo: + if suffix and not member.MemberName.endswith(suffix): + continue + # MemberName is the full LUN path in this format: + # host:/volume/qtree/lun + lun = DfmLun(dataset, member.MemberName, member.MemberId) + self.discovered_luns.append(lun) + finally: + server.DatasetMemberListInfoIterEnd(Tag=tag) + + def _discover_luns(self): + """Discover the LUNs from DFM. + + Discover all of the OpenStack-created datasets and LUNs in the DFM + database. + """ + datasets = self._get_datasets() + self.discovered_datasets = [] + self.discovered_luns = [] + for dataset in datasets: + if not dataset.DatasetName.startswith(self.DATASET_PREFIX): + continue + if (not hasattr(dataset, 'DatasetMetadata') or + not dataset.DatasetMetadata): + continue + project = None + type = None + for field in dataset.DatasetMetadata.DfmMetadataField: + if field.FieldName == self.DATASET_METADATA_PROJECT_KEY: + project = field.FieldValue + elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY: + type = field.FieldValue + if not project: + continue + ds = DfmDataset(dataset.DatasetId, dataset.DatasetName, + project, type) + self.discovered_datasets.append(ds) + self._discover_dataset_luns(ds, None) + dataset_count = len(self.discovered_datasets) + lun_count = len(self.discovered_luns) + msg = _("Discovered %(dataset_count)s datasets and %(lun_count)s LUNs") + LOG.debug(msg % locals()) + self.lun_table = {} + + def _get_job_progress(self, job_id): + """Get progress of one running DFM job. + + Obtain the latest progress report for the job and return the + list of progress events. + """ + server = self.client.service + res = server.DpJobProgressEventListIterStart(JobId=job_id) + tag = res.Tag + event_list = [] + try: + while True: + res = server.DpJobProgressEventListIterNext(Tag=tag, + Maximum=100) + if not hasattr(res, 'ProgressEvents'): + break + event_list += res.ProgressEvents.DpJobProgressEventInfo + finally: + server.DpJobProgressEventListIterEnd(Tag=tag) + return event_list + + def _wait_for_job(self, job_id): + """Wait until a job terminates. + + Poll the job until it completes or an error is detected. Return the + final list of progress events if it completes successfully. + """ + while True: + events = self._get_job_progress(job_id) + for event in events: + if event.EventStatus == 'error': + msg = _('Job failed: %s') % (event.ErrorMessage) + raise exception.VolumeBackendAPIException(data=msg) + if event.EventType == 'job-end': + return events + time.sleep(5) + + def _dataset_name(self, project, ss_type): + """Return the dataset name for a given project and volume type.""" + _project = project.replace(' ', '_').replace('-', '_') + dataset_name = self.DATASET_PREFIX + _project + if not ss_type: + return dataset_name + _type = ss_type.replace(' ', '_').replace('-', '_') + return dataset_name + '_' + _type + + def _get_dataset(self, dataset_name): + """Lookup a dataset by name in the list of discovered datasets.""" + for dataset in self.discovered_datasets: + if dataset.name == dataset_name: + return dataset + return None + + def _create_dataset(self, dataset_name, project, ss_type): + """Create a new dataset using the storage service. + + The export settings are set to create iSCSI LUNs aligned for Linux. + Returns the ID of the new dataset. + """ + if ss_type and not self.storage_service_prefix: + msg = _('Attempt to use volume_type without specifying ' + 'netapp_storage_service_prefix flag.') + raise exception.VolumeBackendAPIException(data=msg) + if not (ss_type or self.storage_service): + msg = _('You must set the netapp_storage_service flag in order to ' + 'create volumes with no volume_type.') + raise exception.VolumeBackendAPIException(data=msg) + storage_service = self.storage_service + if ss_type: + storage_service = self.storage_service_prefix + ss_type + + factory = self.client.factory + + lunmap = factory.create('DatasetLunMappingInfo') + lunmap.IgroupOsType = 'linux' + export = factory.create('DatasetExportInfo') + export.DatasetExportProtocol = 'iscsi' + export.DatasetLunMappingInfo = lunmap + detail = factory.create('StorageSetInfo') + detail.DpNodeName = 'Primary data' + detail.DatasetExportInfo = export + if hasattr(self, 'vfiler') and self.vfiler: + detail.ServerNameOrId = self.vfiler + details = factory.create('ArrayOfStorageSetInfo') + details.StorageSetInfo = [detail] + field1 = factory.create('DfmMetadataField') + field1.FieldName = self.DATASET_METADATA_PROJECT_KEY + field1.FieldValue = project + field2 = factory.create('DfmMetadataField') + field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY + field2.FieldValue = ss_type + metadata = factory.create('ArrayOfDfmMetadataField') + metadata.DfmMetadataField = [field1, field2] + + res = self.client.service.StorageServiceDatasetProvision( + StorageServiceNameOrId=storage_service, + DatasetName=dataset_name, + AssumeConfirmation=True, + StorageSetDetails=details, + DatasetMetadata=metadata) + + ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type) + self.discovered_datasets.append(ds) + return ds + + @utils.synchronized('netapp_dfm', external=True) + def _provision(self, name, description, project, ss_type, size): + """Provision a LUN through provisioning manager. + + The LUN will be created inside a dataset associated with the project. + If the dataset doesn't already exist, we create it using the storage + service specified in the cinder conf. + """ + dataset_name = self._dataset_name(project, ss_type) + dataset = self._get_dataset(dataset_name) + if not dataset: + dataset = self._create_dataset(dataset_name, project, ss_type) + + info = self.client.factory.create('ProvisionMemberRequestInfo') + info.Name = name + if description: + info.Description = description + info.Size = size + info.MaximumSnapshotSpace = 2 * long(size) + + server = self.client.service + lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id) + try: + server.DatasetProvisionMember(EditLockId=lock_id, + ProvisionMemberRequestInfo=info) + res = server.DatasetEditCommit(EditLockId=lock_id, + AssumeConfirmation=True) + except (suds.WebFault, Exception): + server.DatasetEditRollback(EditLockId=lock_id) + msg = _('Failed to provision dataset member') + raise exception.VolumeBackendAPIException(data=msg) + + lun_id = None + lunpath = None + + for info in res.JobIds.JobInfo: + events = self._wait_for_job(info.JobId) + for event in events: + if event.EventType != 'lun-create': + continue + lunpath = event.ProgressLunInfo.LunName + lun_id = event.ProgressLunInfo.LunPathId + + if not lun_id: + msg = _('No LUN was created by the provision job') + raise exception.VolumeBackendAPIException(data=msg) + + lun = DfmLun(dataset, lunpath, lun_id) + self.discovered_luns.append(lun) + self.lun_table[name] = lun + + def _get_ss_type(self, volume): + """Get the storage service type for a volume.""" + id = volume['volume_type_id'] + if not id: + return None + volume_type = volume_types.get_volume_type(None, id) + if not volume_type: + return None + return volume_type['name'] + + @utils.synchronized('netapp_dfm', external=True) + def _remove_destroy(self, name, project): + """Remove the LUN from the dataset, also destroying it. + + Remove the LUN from the dataset and destroy the actual LUN and Qtree + on the storage system. + """ + try: + lun = self._lookup_lun_for_volume(name, project) + lun_details = self._get_lun_details(lun.id) + except exception.VolumeBackendAPIException: + msg = _("No entry in LUN table for volume %(name)s.") + LOG.debug(msg % locals()) + return + + member = self.client.factory.create('DatasetMemberParameter') + member.ObjectNameOrId = lun.id + members = self.client.factory.create('ArrayOfDatasetMemberParameter') + members.DatasetMemberParameter = [member] + + server = self.client.service + lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) + try: + server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, + DatasetMemberParameters=members) + res = server.DatasetEditCommit(EditLockId=lock_id, + AssumeConfirmation=True) + except (suds.WebFault, Exception): + server.DatasetEditRollback(EditLockId=lock_id) + msg = _('Failed to remove and delete dataset LUN member') + raise exception.VolumeBackendAPIException(data=msg) + + for info in res.JobIds.JobInfo: + self._wait_for_job(info.JobId) + + # Note: it's not possible to delete Qtree & his LUN in one transaction + member.ObjectNameOrId = lun_details.QtreeId + lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) + try: + server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, + DatasetMemberParameters=members) + server.DatasetEditCommit(EditLockId=lock_id, + AssumeConfirmation=True) + except (suds.WebFault, Exception): + server.DatasetEditRollback(EditLockId=lock_id) + msg = _('Failed to remove and delete dataset Qtree member') + raise exception.VolumeBackendAPIException(data=msg) + + def create_volume(self, volume): + """Driver entry point for creating a new volume.""" + default_size = '104857600' # 100 MB + gigabytes = 1073741824L # 2^30 + name = volume['name'] + project = volume['project_id'] + display_name = volume['display_name'] + display_description = volume['display_description'] + description = None + if display_name: + if display_description: + description = display_name + "\n" + display_description + else: + description = display_name + elif display_description: + description = display_description + if int(volume['size']) == 0: + size = default_size + else: + size = str(int(volume['size']) * gigabytes) + ss_type = self._get_ss_type(volume) + self._provision(name, description, project, ss_type, size) + + def _lookup_lun_for_volume(self, name, project): + """Lookup the LUN that corresponds to the give volume. + + Initial lookups involve a table scan of all of the discovered LUNs, + but later lookups are done instantly from the hashtable. + """ + if name in self.lun_table: + return self.lun_table[name] + lunpath_suffix = '/' + name + for lun in self.discovered_luns: + if lun.dataset.project != project: + continue + if lun.lunpath.endswith(lunpath_suffix): + self.lun_table[name] = lun + return lun + msg = _("No entry in LUN table for volume %s") % (name) + raise exception.VolumeBackendAPIException(data=msg) + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + name = volume['name'] + project = volume['project_id'] + self._remove_destroy(name, project) + + def _get_lun_details(self, lun_id): + """Given the ID of a LUN, get the details about that LUN.""" + server = self.client.service + res = server.LunListInfoIterStart(ObjectNameOrId=lun_id) + tag = res.Tag + try: + res = server.LunListInfoIterNext(Tag=tag, Maximum=1) + if hasattr(res, 'Luns') and res.Luns.LunInfo: + return res.Luns.LunInfo[0] + finally: + server.LunListInfoIterEnd(Tag=tag) + msg = _('Failed to get LUN details for LUN ID %s') + raise exception.VolumeBackendAPIException(data=msg % lun_id) + + def _get_host_details(self, host_id): + """Given the ID of a host, get the details about it. + + A "host" is a storage system here. + """ + server = self.client.service + res = server.HostListInfoIterStart(ObjectNameOrId=host_id) + tag = res.Tag + try: + res = server.HostListInfoIterNext(Tag=tag, Maximum=1) + if hasattr(res, 'Hosts') and res.Hosts.HostInfo: + return res.Hosts.HostInfo[0] + finally: + server.HostListInfoIterEnd(Tag=tag) + msg = _('Failed to get host details for host ID %s') + raise exception.VolumeBackendAPIException(data=msg % host_id) + + def _get_iqn_for_host(self, host_id): + """Get the iSCSI Target Name for a storage system.""" + request = self.client.factory.create('Request') + request.Name = 'iscsi-node-get-name' + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + return response.Results['node-name'][0] + + def _api_elem_is_empty(self, elem): + """Return true if the API element should be considered empty. + + Helper routine to figure out if a list returned from a proxy API + is empty. This is necessary because the API proxy produces nasty + looking XML. + """ + if type(elem) is not list: + return True + if 0 == len(elem): + return True + child = elem[0] + if isinstance(child, text.Text): + return True + if type(child) is str: + return True + return False + + def _get_target_portal_for_host(self, host_id, host_address): + """Get iSCSI target portal for a storage system. + + Get the iSCSI Target Portal details for a particular IP address + on a storage system. + """ + request = self.client.factory.create('Request') + request.Name = 'iscsi-portal-list-info' + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + portal = {} + portals = response.Results['iscsi-portal-list-entries'] + if self._api_elem_is_empty(portals): + return portal + portal_infos = portals[0]['iscsi-portal-list-entry-info'] + for portal_info in portal_infos: + portal['address'] = portal_info['ip-address'][0] + portal['port'] = portal_info['ip-port'][0] + portal['portal'] = portal_info['tpgroup-tag'][0] + if host_address == portal['address']: + break + return portal + + def _get_export(self, volume): + """Get the iSCSI export details for a volume. + + Looks up the LUN in DFM based on the volume and project name, then get + the LUN's ID. We store that value in the database instead of the iSCSI + details because we will not have the true iSCSI details until masking + time (when initialize_connection() is called). + """ + name = volume['name'] + project = volume['project_id'] + lun = self._lookup_lun_for_volume(name, project) + return {'provider_location': lun.id} + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + return self._get_export(volume) + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + return self._get_export(volume) + + def remove_export(self, context, volume): + """Driver exntry point to remove an export for a volume. + + Since exporting is idempotent in this driver, we have nothing + to do for unexporting. + """ + pass + + def _find_igroup_for_initiator(self, host_id, initiator_name): + """Get the igroup for an initiator. + + Look for an existing igroup (initiator group) on the storage system + containing a given iSCSI initiator and return the name of the igroup. + """ + request = self.client.factory.create('Request') + request.Name = 'igroup-list-info' + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + igroups = response.Results['initiator-groups'] + if self._api_elem_is_empty(igroups): + return None + igroup_infos = igroups[0]['initiator-group-info'] + for igroup_info in igroup_infos: + if ('iscsi' != igroup_info['initiator-group-type'][0] or + 'linux' != igroup_info['initiator-group-os-type'][0]): + continue + igroup_name = igroup_info['initiator-group-name'][0] + if not igroup_name.startswith(self.IGROUP_PREFIX): + continue + initiators = igroup_info['initiators'][0]['initiator-info'] + for initiator in initiators: + if initiator_name == initiator['initiator-name'][0]: + return igroup_name + return None + + def _create_igroup(self, host_id, initiator_name): + """Create a new igroup. + + Create a new igroup (initiator group) on the storage system to hold + the given iSCSI initiator. The group will only have 1 member and will + be named "openstack-${initiator_name}". + """ + igroup_name = self.IGROUP_PREFIX + initiator_name + request = self.client.factory.create('Request') + request.Name = 'igroup-create' + igroup_create_xml = ( + '%s' + 'iscsi' + 'linuxlinux') + request.Args = text.Raw(igroup_create_xml % igroup_name) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + request = self.client.factory.create('Request') + request.Name = 'igroup-add' + igroup_add_xml = ( + '%s' + '%s') + request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + return igroup_name + + def _get_lun_mappping(self, host_id, lunpath, igroup_name): + """Get the mapping between a LUN and an igroup. + + Check if a given LUN is already mapped to the given igroup (initiator + group). If the LUN is mapped, also return the LUN number for the + mapping. + """ + request = self.client.factory.create('Request') + request.Name = 'lun-map-list-info' + request.Args = text.Raw('%s' % (lunpath)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + igroups = response.Results['initiator-groups'] + if self._api_elem_is_empty(igroups): + return {'mapped': False} + igroup_infos = igroups[0]['initiator-group-info'] + for igroup_info in igroup_infos: + if igroup_name == igroup_info['initiator-group-name'][0]: + return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]} + return {'mapped': False} + + def _map_initiator(self, host_id, lunpath, igroup_name): + """Map a LUN to an igroup. + + Map the given LUN to the given igroup (initiator group). Return the LUN + number that the LUN was mapped to (the filer will choose the lowest + available number). + """ + request = self.client.factory.create('Request') + request.Name = 'lun-map' + lun_map_xml = ('%s' + '%s') + request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + return response.Results['lun-id-assigned'][0] + + def _unmap_initiator(self, host_id, lunpath, igroup_name): + """Unmap the given LUN from the given igroup (initiator group).""" + request = self.client.factory.create('Request') + request.Name = 'lun-unmap' + lun_unmap_xml = ('%s' + '%s') + request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + + def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name): + """Ensure that a LUN is mapped to a particular initiator. + + Check if a LUN is mapped to a given initiator already and create + the mapping if it is not. A new igroup will be created if needed. + Returns the LUN number for the mapping between the LUN and initiator + in both cases. + """ + lunpath = '/vol/' + lunpath + igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) + if not igroup_name: + igroup_name = self._create_igroup(host_id, initiator_name) + + mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) + if mapping['mapped']: + return mapping['lun_num'] + return self._map_initiator(host_id, lunpath, igroup_name) + + def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name): + """Ensure that a LUN is not mapped to a particular initiator. + + Check if a LUN is mapped to a given initiator and remove the + mapping if it is. This does not destroy the igroup. + """ + lunpath = '/vol/' + lunpath + igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) + if not igroup_name: + return + + mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) + if mapping['mapped']: + self._unmap_initiator(host_id, lunpath, igroup_name) + + def initialize_connection(self, volume, connector): + """Driver entry point to attach a volume to an instance. + + Do the LUN masking on the storage system so the initiator can access + the LUN on the target. Also return the iSCSI properties so the + initiator can find the LUN. This implementation does not call + _get_iscsi_properties() to get the properties because cannot store the + LUN number in the database. We only find out what the LUN number will + be during this method call so we construct the properties dictionary + ourselves. + """ + initiator_name = connector['initiator'] + lun_id = volume['provider_location'] + if not lun_id: + msg = _("No LUN ID for volume %s") % volume['name'] + raise exception.VolumeBackendAPIException(data=msg) + lun = self._get_lun_details(lun_id) + lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath, + initiator_name) + host = self._get_host_details(lun.HostId) + portal = self._get_target_portal_for_host(host.HostId, + host.HostAddress) + if not portal: + msg = _('Failed to get target portal for filer: %s') + raise exception.VolumeBackendAPIException(data=msg % host.HostName) + + iqn = self._get_iqn_for_host(host.HostId) + if not iqn: + msg = _('Failed to get target IQN for filer: %s') + raise exception.VolumeBackendAPIException(data=msg % host.HostName) + + properties = {} + properties['target_discovered'] = False + (address, port) = (portal['address'], portal['port']) + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = iqn + properties['target_lun'] = lun_num + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance. + + Unmask the LUN on the storage system so the given intiator can no + longer access it. + """ + initiator_name = connector['initiator'] + lun_id = volume['provider_location'] + if not lun_id: + msg = _('No LUN ID for volume %s') % volume['name'] + raise exception.VolumeBackendAPIException(data=msg) + lun = self._get_lun_details(lun_id) + self._ensure_initiator_unmapped(lun.HostId, lun.LunPath, + initiator_name) + + def _is_clone_done(self, host_id, clone_op_id, volume_uuid): + """Check the status of a clone operation. + + Return True if done, False otherwise. + """ + request = self.client.factory.create('Request') + request.Name = 'clone-list-status' + clone_list_status_xml = ( + '' + '%s' + '%s' + '') + request.Args = text.Raw(clone_list_status_xml % (clone_op_id, + volume_uuid)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + if isinstance(response.Results, text.Text): + return False + status = response.Results['status'] + if self._api_elem_is_empty(status): + return False + ops_info = status[0]['ops-info'][0] + state = ops_info['clone-state'][0] + return 'completed' == state + + def _clone_lun(self, host_id, src_path, dest_path, snap): + """Create a clone of a NetApp LUN. + + The clone initially consumes no space and is not space reserved. + """ + request = self.client.factory.create('Request') + request.Name = 'clone-start' + clone_start_xml = ( + '%s%s' + '%s') + if snap: + no_snap = 'false' + else: + no_snap = 'true' + request.Args = text.Raw(clone_start_xml % (src_path, no_snap, + dest_path)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + clone_id = response.Results['clone-id'][0] + clone_id_info = clone_id['clone-id-info'][0] + clone_op_id = clone_id_info['clone-op-id'][0] + volume_uuid = clone_id_info['volume-uuid'][0] + while not self._is_clone_done(host_id, clone_op_id, volume_uuid): + time.sleep(5) + + def _refresh_dfm_luns(self, host_id): + """Refresh the LUN list for one filer in DFM.""" + server = self.client.service + refresh_started_at = time.time() + monitor_names = self.client.factory.create('ArrayOfMonitorName') + monitor_names.MonitorName = ['file_system', 'lun'] + server.DfmObjectRefresh(ObjectNameOrId=host_id, + MonitorNames=monitor_names) + + max_wait = 10 * 60 # 10 minutes + + while True: + if time.time() - refresh_started_at > max_wait: + msg = _('Failed to get LUN list. Is the DFM host' + ' time-synchronized with Cinder host?') + raise exception.VolumeBackendAPIException(msg) + + LOG.info('Refreshing LUN list on DFM...') + time.sleep(15) + res = server.DfmMonitorTimestampList(HostNameOrId=host_id) + timestamps = dict((t.MonitorName, t.LastMonitoringTimestamp) + for t in res.DfmMonitoringTimestamp) + ts_fs = timestamps['file_system'] + ts_lun = timestamps['lun'] + + if ts_fs > refresh_started_at and ts_lun > refresh_started_at: + return # both monitor jobs finished + elif ts_fs == 0 or ts_lun == 0: + pass # lun or file_system is still in progress, wait + else: + monitor_names.MonitorName = [] + if ts_fs <= refresh_started_at: + monitor_names.MonitorName.append('file_system') + if ts_lun <= refresh_started_at: + monitor_names.MonitorName.append('lun') + LOG.debug('Rerunning refresh for monitors: ' + + str(monitor_names.MonitorName)) + server.DfmObjectRefresh(ObjectNameOrId=host_id, + MonitorNames=monitor_names) + + def _destroy_lun(self, host_id, lun_path): + """Destroy a LUN on the filer.""" + request = self.client.factory.create('Request') + request.Name = 'lun-offline' + path_xml = '%s' + request.Args = text.Raw(path_xml % lun_path) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + request = self.client.factory.create('Request') + request.Name = 'lun-destroy' + request.Args = text.Raw(path_xml % lun_path) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + + def _resize_volume(self, host_id, vol_name, new_size): + """Resize the volume by the amount requested.""" + request = self.client.factory.create('Request') + request.Name = 'volume-size' + volume_size_xml = ( + '%s%s') + request.Args = text.Raw(volume_size_xml % (vol_name, new_size)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + + def _create_qtree(self, host_id, vol_name, qtree_name): + """Create a qtree the filer.""" + request = self.client.factory.create('Request') + request.Name = 'qtree-create' + qtree_create_xml = ( + '0755%s%s') + request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + + def create_snapshot(self, snapshot): + """Driver entry point for creating a snapshot. + + This driver implements snapshots by using efficient single-file + (LUN) cloning. + """ + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + project = snapshot['project_id'] + lun = self._lookup_lun_for_volume(vol_name, project) + lun_id = lun.id + lun = self._get_lun_details(lun_id) + extra_gb = snapshot['volume_size'] + new_size = '+%dg' % extra_gb + self._resize_volume(lun.HostId, lun.VolumeName, new_size) + # LunPath is the partial LUN path in this format: volume/qtree/lun + lun_path = str(lun.LunPath) + lun_name = lun_path[lun_path.rfind('/') + 1:] + qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName) + src_path = '%s/%s' % (qtree_path, lun_name) + dest_path = '%s/%s' % (qtree_path, snapshot_name) + self._clone_lun(lun.HostId, src_path, dest_path, True) + + def delete_snapshot(self, snapshot): + """Driver entry point for deleting a snapshot.""" + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + project = snapshot['project_id'] + lun = self._lookup_lun_for_volume(vol_name, project) + lun_id = lun.id + lun = self._get_lun_details(lun_id) + lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, + snapshot_name) + self._destroy_lun(lun.HostId, lun_path) + extra_gb = snapshot['volume_size'] + new_size = '-%dg' % extra_gb + self._resize_volume(lun.HostId, lun.VolumeName, new_size) + + def create_volume_from_snapshot(self, volume, snapshot): + """Driver entry point for creating a new volume from a snapshot. + + Many would call this "cloning" and in fact we use cloning to implement + this feature. + """ + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + if vol_size != snap_size: + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + project = snapshot['project_id'] + lun = self._lookup_lun_for_volume(vol_name, project) + lun_id = lun.id + dataset = lun.dataset + old_type = dataset.type + new_type = self._get_ss_type(volume) + if new_type != old_type: + msg = _('Cannot create volume of type %(new_type)s from ' + 'snapshot of type %(old_type)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + lun = self._get_lun_details(lun_id) + extra_gb = vol_size + new_size = '+%dg' % extra_gb + self._resize_volume(lun.HostId, lun.VolumeName, new_size) + clone_name = volume['name'] + self._create_qtree(lun.HostId, lun.VolumeName, clone_name) + src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, + snapshot_name) + dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name) + self._clone_lun(lun.HostId, src_path, dest_path, False) + self._refresh_dfm_luns(lun.HostId) + self._discover_dataset_luns(dataset, clone_name) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + vol_size = volume['size'] + src_vol_size = src_vref['size'] + if vol_size != src_vol_size: + msg = _('Cannot create clone of size %(vol_size)s from ' + 'volume of size %(src_vol_size)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + src_vol_name = src_vref['name'] + project = src_vref['project_id'] + lun = self._lookup_lun_for_volume(src_vol_name, project) + lun_id = lun.id + dataset = lun.dataset + old_type = dataset.type + new_type = self._get_ss_type(volume) + if new_type != old_type: + msg = _('Cannot create clone of type %(new_type)s from ' + 'volume of type %(old_type)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + lun = self._get_lun_details(lun_id) + extra_gb = vol_size + new_size = '+%dg' % extra_gb + self._resize_volume(lun.HostId, lun.VolumeName, new_size) + clone_name = volume['name'] + self._create_qtree(lun.HostId, lun.VolumeName, clone_name) + src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, + src_vol_name) + dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name) + self._clone_lun(lun.HostId, src_path, dest_path, False) + self._refresh_dfm_luns(lun.HostId) + self._discover_dataset_luns(dataset, clone_name) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'NetApp_iSCSI_7mode' + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data + + +class NetAppLun(object): + """Represents a LUN on NetApp storage.""" + + def __init__(self, handle, name, size, metadata_dict): + self.handle = handle + self.name = name + self.size = size + self.metadata = metadata_dict or {} + + def get_metadata_property(self, prop): + """Get the metadata property of a LUN.""" + if prop in self.metadata: + return self.metadata[prop] + name = self.name + msg = _("No metadata property %(prop)s defined for the LUN %(name)s") + LOG.debug(msg % locals()) + + def __str__(self, *args, **kwargs): + return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\ + % (self.handle, self.name, self.size, self.metadata) + + +class NetAppCmodeISCSIDriver(driver.ISCSIDriver): + """NetApp C-mode iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_opts) + self.lun_table = {} + + def _create_client(self, **kwargs): + """Instantiate a web services client. + + This method creates a "suds" client to make web services calls to the + DFM server. Note that the WSDL file is quite large and may take + a few seconds to parse. + """ + wsdl_url = kwargs['wsdl_url'] + LOG.debug(_('Using WSDL: %s') % wsdl_url) + if kwargs['cache']: + self.client = client.Client(wsdl_url, username=kwargs['login'], + password=kwargs['password']) + else: + self.client = client.Client(wsdl_url, username=kwargs['login'], + password=kwargs['password'], + cache=None) + + def _check_flags(self): + """Ensure that the flags we care about are set.""" + required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', + 'netapp_server_hostname', 'netapp_server_port'] + for flag in required_flags: + if not getattr(self.configuration, flag, None): + msg = _('%s is not set') % flag + raise exception.InvalidInput(data=msg) + + def do_setup(self, context): + """Setup the NetApp Volume driver. + + Called one time by the manager after the driver is loaded. + Validate the flags we care about and setup the suds (web services) + client. + """ + self._check_flags() + self._create_client( + wsdl_url=self.configuration.netapp_wsdl_url, + login=self.configuration.netapp_login, + password=self.configuration.netapp_password, + hostname=self.configuration.netapp_server_hostname, + port=self.configuration.netapp_server_port, cache=True) + + def check_for_setup_error(self): + """Check that the driver is working and can communicate. + + Discovers the LUNs on the NetApp server. + """ + self.lun_table = {} + luns = self.client.service.ListLuns() + for lun in luns: + meta_dict = {} + if hasattr(lun, 'Metadata'): + meta_dict = self._create_dict_from_meta(lun.Metadata) + discovered_lun = NetAppLun(lun.Handle, + lun.Name, + lun.Size, + meta_dict) + self._add_lun_to_table(discovered_lun) + LOG.debug(_("Success getting LUN list from server")) + + def create_volume(self, volume): + """Driver entry point for creating a new volume.""" + default_size = '104857600' # 100 MB + gigabytes = 1073741824L # 2^30 + name = volume['name'] + if int(volume['size']) == 0: + size = default_size + else: + size = str(int(volume['size']) * gigabytes) + extra_args = {} + extra_args['OsType'] = 'linux' + extra_args['QosType'] = self._get_qos_type(volume) + extra_args['Container'] = volume['project_id'] + extra_args['Display'] = volume['display_name'] + extra_args['Description'] = volume['display_description'] + extra_args['SpaceReserved'] = True + server = self.client.service + metadata = self._create_metadata_list(extra_args) + lun = server.ProvisionLun(Name=name, Size=size, + Metadata=metadata) + LOG.debug(_("Created LUN with name %s") % name) + self._add_lun_to_table( + NetAppLun(lun.Handle, + lun.Name, + lun.Size, + self._create_dict_from_meta(lun.Metadata))) + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + name = volume['name'] + handle = self._get_lun_handle(name) + if not handle: + msg = _("No entry in LUN table for volume %(name)s.") + LOG.warn(msg % locals()) + return + self.client.service.DestroyLun(Handle=handle) + LOG.debug(_("Destroyed LUN %s") % handle) + self.lun_table.pop(name) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + handle = self._get_lun_handle(volume['name']) + return {'provider_location': handle} + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + handle = self._get_lun_handle(volume['name']) + return {'provider_location': handle} + + def remove_export(self, context, volume): + """Driver exntry point to remove an export for a volume. + + Since exporting is idempotent in this driver, we have nothing + to do for unexporting. + """ + pass + + def initialize_connection(self, volume, connector): + """Driver entry point to attach a volume to an instance. + + Do the LUN masking on the storage system so the initiator can access + the LUN on the target. Also return the iSCSI properties so the + initiator can find the LUN. This implementation does not call + _get_iscsi_properties() to get the properties because cannot store the + LUN number in the database. We only find out what the LUN number will + be during this method call so we construct the properties dictionary + ourselves. + """ + initiator_name = connector['initiator'] + handle = volume['provider_location'] + server = self.client.service + server.MapLun(Handle=handle, InitiatorType="iscsi", + InitiatorName=initiator_name) + msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s") + LOG.debug(msg % locals()) + + target_details_list = server.GetLunTargetDetails( + Handle=handle, + InitiatorType="iscsi", + InitiatorName=initiator_name) + msg = _("Succesfully fetched target details for LUN %(handle)s and " + "initiator %(initiator_name)s") + LOG.debug(msg % locals()) + + if not target_details_list: + msg = _('Failed to get LUN target details for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % handle) + target_details = target_details_list[0] + if not target_details.Address and target_details.Port: + msg = _('Failed to get target portal for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % handle) + iqn = target_details.Iqn + if not iqn: + msg = _('Failed to get target IQN for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % handle) + + properties = {} + properties['target_discovered'] = False + (address, port) = (target_details.Address, target_details.Port) + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = iqn + properties['target_lun'] = target_details.LunNumber + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance. + + Unmask the LUN on the storage system so the given intiator can no + longer access it. + """ + initiator_name = connector['initiator'] + handle = volume['provider_location'] + self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi", + InitiatorName=initiator_name) + msg = _("Unmapped LUN %(handle)s from the initiator " + "%(initiator_name)s") + LOG.debug(msg % locals()) + + def create_snapshot(self, snapshot): + """Driver entry point for creating a snapshot. + + This driver implements snapshots by using efficient single-file + (LUN) cloning. + """ + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + lun = self.lun_table[vol_name] + extra_args = {'SpaceReserved': False} + self._clone_lun(lun.handle, snapshot_name, extra_args) + + def delete_snapshot(self, snapshot): + """Driver entry point for deleting a snapshot.""" + name = snapshot['name'] + handle = self._get_lun_handle(name) + if not handle: + msg = _("No entry in LUN table for snapshot %(name)s.") + LOG.warn(msg % locals()) + return + self.client.service.DestroyLun(Handle=handle) + LOG.debug(_("Destroyed LUN %s") % handle) + self.lun_table.pop(snapshot['name']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Driver entry point for creating a new volume from a snapshot. + + Many would call this "cloning" and in fact we use cloning to implement + this feature. + """ + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + if vol_size != snap_size: + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + snapshot_name = snapshot['name'] + lun = self.lun_table[snapshot_name] + new_name = volume['name'] + extra_args = {} + extra_args['OsType'] = 'linux' + extra_args['QosType'] = self._get_qos_type(volume) + extra_args['Container'] = volume['project_id'] + extra_args['Display'] = volume['display_name'] + extra_args['Description'] = volume['display_description'] + extra_args['SpaceReserved'] = True + self._clone_lun(lun.handle, new_name, extra_args) + + def _get_qos_type(self, volume): + """Get the storage service type for a volume.""" + type_id = volume['volume_type_id'] + if not type_id: + return None + volume_type = volume_types.get_volume_type(None, type_id) + if not volume_type: + return None + return volume_type['name'] + + def _add_lun_to_table(self, lun): + """Adds LUN to cache table.""" + if not isinstance(lun, NetAppLun): + msg = _("Object is not a NetApp LUN.") + raise exception.VolumeBackendAPIException(data=msg) + self.lun_table[lun.name] = lun + + def _clone_lun(self, handle, new_name, extra_args): + """Clone LUN with the given handle to the new name.""" + server = self.client.service + metadata = self._create_metadata_list(extra_args) + lun = server.CloneLun(Handle=handle, NewName=new_name, + Metadata=metadata) + LOG.debug(_("Cloned LUN with new name %s") % new_name) + self._add_lun_to_table( + NetAppLun(lun.Handle, + lun.Name, + lun.Size, + self._create_dict_from_meta(lun.Metadata))) + + def _create_metadata_list(self, extra_args): + """Creates metadata from kwargs.""" + metadata = [] + for key in extra_args.keys(): + meta = self.client.factory.create("Metadata") + meta.Key = key + meta.Value = extra_args[key] + metadata.append(meta) + return metadata + + def _get_lun_handle(self, name): + """Get the details for a LUN from our cache table.""" + if name not in self.lun_table: + LOG.warn(_("Could not find handle for LUN named %s") % name) + return None + return self.lun_table[name].handle + + def _create_dict_from_meta(self, metadata): + """Creates dictionary from metadata array.""" + meta_dict = {} + if not metadata: + return meta_dict + for meta in metadata: + meta_dict[meta.Key] = meta.Value + return meta_dict + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + vol_size = volume['size'] + src_vol = self.lun_table[src_vref['name']] + src_vol_size = src_vref['size'] + if vol_size != src_vol_size: + msg = _('Cannot clone volume of size %(vol_size)s from ' + 'src volume of size %(src_vol_size)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + new_name = volume['name'] + extra_args = {} + extra_args['OsType'] = 'linux' + extra_args['QosType'] = self._get_qos_type(volume) + extra_args['Container'] = volume['project_id'] + extra_args['Display'] = volume['display_name'] + extra_args['Description'] = volume['display_description'] + extra_args['SpaceReserved'] = True + self._clone_lun(src_vol.handle, new_name, extra_args) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'NetApp_iSCSI_Cluster' + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data + + +class NetAppDirectISCSIDriver(driver.ISCSIDriver): + """NetApp Direct iSCSI volume driver.""" + + IGROUP_PREFIX = 'openstack-' + required_flags = ['netapp_transport_type', 'netapp_login', + 'netapp_password', 'netapp_server_hostname', + 'netapp_server_port'] + + def __init__(self, *args, **kwargs): + super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_opts) + self.lun_table = {} + + def _create_client(self, **kwargs): + """Instantiate a client for NetApp server. + + This method creates NetApp server client for api communication. + """ + host_filer = kwargs['hostname'] + LOG.debug(_('Using NetApp filer: %s') % host_filer) + self.client = NaServer(host=host_filer, + server_type=NaServer.SERVER_TYPE_FILER, + transport_type=kwargs['transport_type'], + style=NaServer.STYLE_LOGIN_PASSWORD, + username=kwargs['login'], + password=kwargs['password']) + + def _do_custom_setup(self): + """Does custom setup depending on the type of filer.""" + raise NotImplementedError() + + def _check_flags(self): + """Ensure that the flags we care about are set.""" + required_flags = self.required_flags + for flag in required_flags: + if not getattr(self.configuration, flag, None): + msg = _('%s is not set') % flag + raise exception.InvalidInput(data=msg) + + def do_setup(self, context): + """Setup the NetApp Volume driver. + + Called one time by the manager after the driver is loaded. + Validate the flags we care about and setup NetApp + client. + """ + self._check_flags() + self._create_client( + transport_type=self.configuration.netapp_transport_type, + login=self.configuration.netapp_login, + password=self.configuration.netapp_password, + hostname=self.configuration.netapp_server_hostname, + port=self.configuration.netapp_server_port) + self._do_custom_setup() + + def check_for_setup_error(self): + """Check that the driver is working and can communicate. + + Discovers the LUNs on the NetApp server. + """ + self.lun_table = {} + self._get_lun_list() + LOG.debug(_("Success getting LUN list from server")) + + def create_volume(self, volume): + """Driver entry point for creating a new volume.""" + default_size = '104857600' # 100 MB + gigabytes = 1073741824L # 2^30 + name = volume['name'] + if int(volume['size']) == 0: + size = default_size + else: + size = str(int(volume['size']) * gigabytes) + metadata = {} + metadata['OsType'] = 'linux' + metadata['SpaceReserved'] = 'true' + self._create_lun_on_eligible_vol(name, size, metadata) + LOG.debug(_("Created LUN with name %s") % name) + handle = self._create_lun_handle(metadata) + self._add_lun_to_table(NetAppLun(handle, name, size, metadata)) + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + name = volume['name'] + metadata = self._get_lun_attr(name, 'metadata') + if not metadata: + msg = _("No entry in LUN table for volume/snapshot %(name)s.") + LOG.warn(msg % locals()) + return + lun_destroy = NaElement.create_node_with_children( + 'lun-destroy', + **{'path': metadata['Path'], + 'force': 'true'}) + self.client.invoke_successfully(lun_destroy, True) + LOG.debug(_("Destroyed LUN %s") % name) + self.lun_table.pop(name) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + handle = self._get_lun_attr(volume['name'], 'handle') + return {'provider_location': handle} + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + handle = self._get_lun_attr(volume['name'], 'handle') + return {'provider_location': handle} + + def remove_export(self, context, volume): + """Driver exntry point to remove an export for a volume. + + Since exporting is idempotent in this driver, we have nothing + to do for unexporting. + """ + pass + + def initialize_connection(self, volume, connector): + """Driver entry point to attach a volume to an instance. + + Do the LUN masking on the storage system so the initiator can access + the LUN on the target. Also return the iSCSI properties so the + initiator can find the LUN. This implementation does not call + _get_iscsi_properties() to get the properties because cannot store the + LUN number in the database. We only find out what the LUN number will + be during this method call so we construct the properties dictionary + ourselves. + """ + initiator_name = connector['initiator'] + name = volume['name'] + lun_id = self._map_lun(name, initiator_name, 'iscsi', None) + msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s") + LOG.debug(msg % locals()) + iqn = self._get_iscsi_service_details() + target_details_list = self._get_target_details() + msg = _("Succesfully fetched target details for LUN %(name)s and " + "initiator %(initiator_name)s") + LOG.debug(msg % locals()) + + if not target_details_list: + msg = _('Failed to get LUN target details for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % name) + target_details = None + for tgt_detail in target_details_list: + if tgt_detail.get('interface-enabled', 'true') == 'true': + target_details = tgt_detail + break + if not target_details: + target_details = target_details_list[0] + + if not target_details['address'] and target_details['port']: + msg = _('Failed to get target portal for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % name) + if not iqn: + msg = _('Failed to get target IQN for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % name) + + properties = {} + properties['target_discovered'] = False + (address, port) = (target_details['address'], target_details['port']) + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = iqn + properties['target_lun'] = lun_id + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def create_snapshot(self, snapshot): + """Driver entry point for creating a snapshot. + + This driver implements snapshots by using efficient single-file + (LUN) cloning. + """ + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + lun = self.lun_table[vol_name] + self._clone_lun(lun.name, snapshot_name, 'false') + + def delete_snapshot(self, snapshot): + """Driver entry point for deleting a snapshot.""" + self.delete_volume(snapshot) + LOG.debug(_("Snapshot %s deletion successful") % snapshot['name']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Driver entry point for creating a new volume from a snapshot. + + Many would call this "cloning" and in fact we use cloning to implement + this feature. + """ + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + if vol_size != snap_size: + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + snapshot_name = snapshot['name'] + new_name = volume['name'] + self._clone_lun(snapshot_name, new_name, 'true') + + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance. + + Unmask the LUN on the storage system so the given intiator can no + longer access it. + """ + initiator_name = connector['initiator'] + name = volume['name'] + metadata = self._get_lun_attr(name, 'metadata') + path = metadata['Path'] + self._unmap_lun(path, initiator_name) + msg = _("Unmapped LUN %(name)s from the initiator " + "%(initiator_name)s") + LOG.debug(msg % locals()) + + def _get_ontapi_version(self): + """Gets the supported ontapi version.""" + ontapi_version = NaElement('system-get-ontapi-version') + res = self.client.invoke_successfully(ontapi_version, False) + major = res.get_child_content('major-version') + minor = res.get_child_content('minor-version') + return (major, minor) + + def _create_lun_on_eligible_vol(self, name, size, metadata): + """Creates an actual lun on filer.""" + req_size = float(size) *\ + float(self.configuration.netapp_size_multiplier) + volume = self._get_avl_volume_by_size(req_size) + if not volume: + msg = _('Failed to get vol with required size for volume: %s') + raise exception.VolumeBackendAPIException(data=msg % name) + path = '/vol/%s/%s' % (volume['name'], name) + lun_create = NaElement.create_node_with_children( + 'lun-create-by-size', + **{'path': path, 'size': size, + 'ostype': metadata['OsType'], + 'space-reservation-enabled': + metadata['SpaceReserved']}) + self.client.invoke_successfully(lun_create, True) + metadata['Path'] = '/vol/%s/%s' % (volume['name'], name) + metadata['Volume'] = volume['name'] + metadata['Qtree'] = None + + def _get_avl_volume_by_size(self, size): + """Get the available volume by size.""" + raise NotImplementedError() + + def _get_iscsi_service_details(self): + """Returns iscsi iqn.""" + raise NotImplementedError() + + def _get_target_details(self): + """Gets the target portal details.""" + raise NotImplementedError() + + def _create_lun_handle(self, metadata): + """Returns lun handle based on filer type.""" + raise NotImplementedError() + + def _get_lun_list(self): + """Gets the list of luns on filer.""" + raise NotImplementedError() + + def _extract_and_populate_luns(self, api_luns): + """Extracts the luns from api. + + Populates in the lun table. + """ + for lun in api_luns: + meta_dict = self._create_lun_meta(lun) + path = lun.get_child_content('path') + (rest, splitter, name) = path.rpartition('/') + handle = self._create_lun_handle(meta_dict) + size = lun.get_child_content('size') + discovered_lun = NetAppLun(handle, name, + size, meta_dict) + self._add_lun_to_table(discovered_lun) + + def _is_naelement(self, elem): + """Checks if element is NetApp element.""" + if not isinstance(elem, NaElement): + raise ValueError('Expects NaElement') + + def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None): + """Maps lun to the initiator and returns lun id assigned.""" + metadata = self._get_lun_attr(name, 'metadata') + os = metadata['OsType'] + path = metadata['Path'] + if self._check_allowed_os(os): + os = os + else: + os = 'default' + igroup_name = self._get_or_create_igroup(initiator, + initiator_type, os) + lun_map = NaElement.create_node_with_children( + 'lun-map', **{'path': path, + 'initiator-group': igroup_name}) + if lun_id: + lun_map.add_new_child('lun-id', lun_id) + try: + result = self.client.invoke_successfully(lun_map, True) + return result.get_child_content('lun-id-assigned') + except NaApiError as e: + code = e.code + message = e.message + msg = _('Error mapping lun. Code :%(code)s, Message:%(message)s') + LOG.warn(msg % locals()) + (igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) + if lun_id is not None: + return lun_id + else: + raise e + + def _unmap_lun(self, path, initiator): + """Unmaps a lun from given initiator.""" + (igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator) + lun_unmap = NaElement.create_node_with_children( + 'lun-unmap', + **{'path': path, + 'initiator-group': igroup_name}) + try: + self.client.invoke_successfully(lun_unmap, True) + except NaApiError as e: + msg = _("Error unmapping lun. Code :%(code)s, Message:%(message)s") + code = e.code + message = e.message + LOG.warn(msg % locals()) + # if the lun is already unmapped + if e.code == '13115' or e.code == '9016': + pass + else: + raise e + + def _find_mapped_lun_igroup(self, path, initiator, os=None): + """Find the igroup for mapped lun with initiator.""" + raise NotImplementedError() + + def _get_or_create_igroup(self, initiator, initiator_type='iscsi', + os='default'): + """Checks for an igroup for an initiator. + + Creates igroup if not found. + """ + igroups = self._get_igroup_by_initiator(initiator=initiator) + igroup_name = None + for igroup in igroups: + if igroup['initiator-group-os-type'] == os: + if igroup['initiator-group-type'] == initiator_type or \ + igroup['initiator-group-type'] == 'mixed': + if igroup['initiator-group-name'].startswith( + self.IGROUP_PREFIX): + igroup_name = igroup['initiator-group-name'] + break + if not igroup_name: + igroup_name = self.IGROUP_PREFIX + str(uuid.uuid4()) + self._create_igroup(igroup_name, initiator_type, os) + self._add_igroup_initiator(igroup_name, initiator) + return igroup_name + + def _get_igroup_by_initiator(self, initiator): + """Get igroups by initiator.""" + raise NotImplementedError() + + def _check_allowed_os(self, os): + """Checks if the os type supplied is NetApp supported.""" + if os in ['linux', 'aix', 'hpux', 'windows', 'solaris', + 'netware', 'vmware', 'openvms', 'xen', 'hyper_v']: + return True + else: + return False + + def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): + """Creates igoup with specified args.""" + igroup_create = NaElement.create_node_with_children( + 'igroup-create', + **{'initiator-group-name': igroup, + 'initiator-group-type': igroup_type, + 'os-type': os_type}) + self.client.invoke_successfully(igroup_create, True) + + def _add_igroup_initiator(self, igroup, initiator): + """Adds initiators to the specified igroup.""" + igroup_add = NaElement.create_node_with_children( + 'igroup-add', + **{'initiator-group-name': igroup, + 'initiator': initiator}) + self.client.invoke_successfully(igroup_add, True) + + def _get_qos_type(self, volume): + """Get the storage service type for a volume.""" + type_id = volume['volume_type_id'] + if not type_id: + return None + volume_type = volume_types.get_volume_type(None, type_id) + if not volume_type: + return None + return volume_type['name'] + + def _add_lun_to_table(self, lun): + """Adds LUN to cache table.""" + if not isinstance(lun, NetAppLun): + msg = _("Object is not a NetApp LUN.") + raise exception.VolumeBackendAPIException(data=msg) + self.lun_table[lun.name] = lun + + def _clone_lun(self, name, new_name, space_reserved): + """Clone LUN with the given name to the new name.""" + raise NotImplementedError() + + def _get_lun_by_args(self, **args): + """Retrives lun with specified args.""" + raise NotImplementedError() + + def _get_lun_attr(self, name, attr): + """Get the attributes for a LUN from our cache table.""" + if not name in self.lun_table or not hasattr( + self.lun_table[name], attr): + LOG.warn(_("Could not find attribute for LUN named %s") % name) + return None + return getattr(self.lun_table[name], attr) + + def _create_lun_meta(self, lun): + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + vol_size = volume['size'] + src_vol = self.lun_table[src_vref['name']] + src_vol_size = src_vref['size'] + if vol_size != src_vol_size: + msg = _('Cannot clone volume of size %(vol_size)s from ' + 'src volume of size %(src_vol_size)s') + raise exception.VolumeBackendAPIException(data=msg % locals()) + new_name = volume['name'] + self._clone_lun(src_vol.name, new_name, 'true') + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + raise NotImplementedError() + + +class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): + """NetApp C-mode iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs) + + def _do_custom_setup(self): + """Does custom setup for ontap cluster.""" + self.vserver = self.configuration.netapp_vserver + # We set vserver in client permanently. + # To use tunneling enable_tunneling while invoking api + self.client.set_vserver(self.vserver) + # Default values to run first api + self.client.set_api_version(1, 15) + (major, minor) = self._get_ontapi_version() + self.client.set_api_version(major, minor) + + def _get_avl_volume_by_size(self, size): + """Get the available volume by size.""" + tag = None + while True: + vol_request = self._create_avl_vol_request(self.vserver, tag) + res = self.client.invoke_successfully(vol_request) + tag = res.get_child_content('next-tag') + attr_list = res.get_child_by_name('attributes-list') + vols = attr_list.get_children() + for vol in vols: + vol_space = vol.get_child_by_name('volume-space-attributes') + avl_size = vol_space.get_child_content('size-available') + if float(avl_size) >= float(size): + avl_vol = dict() + vol_id = vol.get_child_by_name('volume-id-attributes') + avl_vol['name'] = vol_id.get_child_content('name') + avl_vol['vserver'] = vol_id.get_child_content( + 'owning-vserver-name') + avl_vol['size-available'] = avl_size + return avl_vol + if tag is None: + break + return None + + def _create_avl_vol_request(self, vserver, tag=None): + vol_get_iter = NaElement('volume-get-iter') + vol_get_iter.add_new_child('max-records', '100') + if tag: + vol_get_iter.add_new_child('tag', tag, True) + query = NaElement('query') + vol_get_iter.add_child_elem(query) + vol_attrs = NaElement('volume-attributes') + query.add_child_elem(vol_attrs) + if vserver: + vol_attrs.add_node_with_children( + 'volume-id-attributes', + **{"owning-vserver-name": vserver}) + vol_attrs.add_node_with_children( + 'volume-state-attributes', + **{"is-vserver-root": "false", "state": "online"}) + desired_attrs = NaElement('desired-attributes') + vol_get_iter.add_child_elem(desired_attrs) + des_vol_attrs = NaElement('volume-attributes') + desired_attrs.add_child_elem(des_vol_attrs) + des_vol_attrs.add_node_with_children( + 'volume-id-attributes', + **{"name": None, "owning-vserver-name": None}) + des_vol_attrs.add_node_with_children( + 'volume-space-attributes', + **{"size-available": None}) + des_vol_attrs.add_node_with_children('volume-state-attributes', + **{"is-cluster-volume": None, + "is-vserver-root": None, + "state": None}) + return vol_get_iter + + def _get_target_details(self): + """Gets the target portal details.""" + iscsi_if_iter = NaElement('iscsi-interface-get-iter') + result = self.client.invoke_successfully(iscsi_if_iter, True) + tgt_list = [] + if result.get_child_content('num-records')\ + and int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + iscsi_if_list = attr_list.get_children() + for iscsi_if in iscsi_if_list: + d = dict() + d['address'] = iscsi_if.get_child_content('ip-address') + d['port'] = iscsi_if.get_child_content('ip-port') + d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') + d['interface-enabled'] = iscsi_if.get_child_content( + 'is-interface-enabled') + tgt_list.append(d) + return tgt_list + + def _get_iscsi_service_details(self): + """Returns iscsi iqn.""" + iscsi_service_iter = NaElement('iscsi-service-get-iter') + result = self.client.invoke_successfully(iscsi_service_iter, True) + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + iscsi_service = attr_list.get_child_by_name('iscsi-service-info') + return iscsi_service.get_child_content('node-name') + LOG.debug(_('No iscsi service found for vserver %s') % (self.vserver)) + return None + + def _create_lun_handle(self, metadata): + """Returns lun handle based on filer type.""" + return '%s:%s' % (self.vserver, metadata['Path']) + + def _get_lun_list(self): + """Gets the list of luns on filer. + + Gets the luns from cluster with vserver. + """ + tag = None + while True: + api = NaElement('lun-get-iter') + api.add_new_child('max-records', '100') + if tag: + api.add_new_child('tag', tag, True) + lun_info = NaElement('lun-info') + lun_info.add_new_child('vserver', self.vserver) + query = NaElement('query') + query.add_child_elem(lun_info) + api.add_child_elem(query) + result = self.client.invoke_successfully(api) + if result.get_child_by_name('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + self._extract_and_populate_luns(attr_list.get_children()) + tag = result.get_child_content('next-tag') + if tag is None: + break + + def _find_mapped_lun_igroup(self, path, initiator, os=None): + """Find the igroup for mapped lun with initiator.""" + initiator_igroups = self._get_igroup_by_initiator(initiator=initiator) + lun_maps = self._get_lun_map(path) + if initiator_igroups and lun_maps: + for igroup in initiator_igroups: + igroup_name = igroup['initiator-group-name'] + if igroup_name.startswith(self.IGROUP_PREFIX): + for lun_map in lun_maps: + if lun_map['initiator-group'] == igroup_name: + return (igroup_name, lun_map['lun-id']) + return (None, None) + + def _get_lun_map(self, path): + """Gets the lun map by lun path.""" + tag = None + map_list = [] + while True: + lun_map_iter = NaElement('lun-map-get-iter') + lun_map_iter.add_new_child('max-records', '100') + if tag: + lun_map_iter.add_new_child('tag', tag, True) + query = NaElement('query') + lun_map_iter.add_child_elem(query) + query.add_node_with_children('lun-map-info', **{'path': path}) + result = self.client.invoke_successfully(lun_map_iter, True) + tag = result.get_child_content('next-tag') + if result.get_child_content('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + lun_maps = attr_list.get_children() + for lun_map in lun_maps: + lun_m = dict() + lun_m['initiator-group'] = lun_map.get_child_content( + 'initiator-group') + lun_m['lun-id'] = lun_map.get_child_content('lun-id') + lun_m['vserver'] = lun_map.get_child_content('vserver') + map_list.append(lun_m) + if tag is None: + break + return map_list + + def _get_igroup_by_initiator(self, initiator): + """Get igroups by initiator.""" + tag = None + igroup_list = [] + while True: + igroup_iter = NaElement('igroup-get-iter') + igroup_iter.add_new_child('max-records', '100') + if tag: + igroup_iter.add_new_child('tag', tag, True) + query = NaElement('query') + igroup_iter.add_child_elem(query) + igroup_info = NaElement('initiator-group-info') + query.add_child_elem(igroup_info) + igroup_info.add_new_child('vserver', self.vserver) + initiators = NaElement('initiators') + igroup_info.add_child_elem(initiators) + initiators.add_node_with_children('initiator-info', + **{'initiator-name': initiator}) + des_attrs = NaElement('desired-attributes') + des_ig_info = NaElement('initiator-group-info') + des_attrs.add_child_elem(des_ig_info) + des_ig_info.add_node_with_children('initiators', + **{'initiator-info': None}) + des_ig_info.add_new_child('vserver', None) + des_ig_info.add_new_child('initiator-group-name', None) + des_ig_info.add_new_child('initiator-group-type', None) + des_ig_info.add_new_child('initiator-group-os-type', None) + igroup_iter.add_child_elem(des_attrs) + result = self.client.invoke_successfully(igroup_iter, False) + tag = result.get_child_content('next-tag') + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) > 0: + attr_list = result.get_child_by_name('attributes-list') + igroups = attr_list.get_children() + for igroup in igroups: + ig = dict() + ig['initiator-group-os-type'] = igroup.get_child_content( + 'initiator-group-os-type') + ig['initiator-group-type'] = igroup.get_child_content( + 'initiator-group-type') + ig['initiator-group-name'] = igroup.get_child_content( + 'initiator-group-name') + igroup_list.append(ig) + if tag is None: + break + return igroup_list + + def _clone_lun(self, name, new_name, space_reserved): + """Clone LUN with the given handle to the new name.""" + metadata = self._get_lun_attr(name, 'metadata') + volume = metadata['Volume'] + clone_create = NaElement.create_node_with_children( + 'clone-create', + **{'volume': volume, 'source-path': name, + 'destination-path': new_name, + 'space-reserve': space_reserved}) + self.client.invoke_successfully(clone_create, True) + LOG.debug(_("Cloned LUN with new name %s") % new_name) + lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s' + % (volume, new_name)) + if len(lun) == 0: + msg = _("No clonned lun named %s found on the filer") + raise exception.VolumeBackendAPIException(data=msg % (new_name)) + clone_meta = self._create_lun_meta(lun[0]) + self._add_lun_to_table(NetAppLun('%s:%s' % (clone_meta['Vserver'], + clone_meta['Path']), + new_name, + lun[0].get_child_content('size'), + clone_meta)) + + def _get_lun_by_args(self, **args): + """Retrives lun with specified args.""" + lun_iter = NaElement('lun-get-iter') + lun_iter.add_new_child('max-records', '100') + query = NaElement('query') + lun_iter.add_child_elem(query) + query.add_node_with_children('lun-info', **args) + luns = self.client.invoke_successfully(lun_iter) + attr_list = luns.get_child_by_name('attributes-list') + return attr_list.get_children() + + def _create_lun_meta(self, lun): + """Creates lun metadata dictionary.""" + self._is_naelement(lun) + meta_dict = {} + self._is_naelement(lun) + meta_dict['Vserver'] = lun.get_child_content('vserver') + meta_dict['Volume'] = lun.get_child_content('volume') + meta_dict['Qtree'] = lun.get_child_content('qtree') + meta_dict['Path'] = lun.get_child_content('path') + meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') + meta_dict['SpaceReserved'] = \ + lun.get_child_content('is-space-reservation-enabled') + return meta_dict + + def _configure_tunneling(self, do_tunneling=False): + """Configures tunneling for ontap cluster.""" + if do_tunneling: + self.client.set_vserver(self.vserver) + else: + self.client.set_vserver(None) + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = (backend_name + or 'NetApp_iSCSI_Cluster_direct') + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data + + +class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): + """NetApp 7-mode iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs) + + def _do_custom_setup(self): + """Does custom setup depending on the type of filer.""" + self.vfiler = self.configuration.netapp_vfiler + self.volume_list = self.configuration.netapp_volume_list + if self.volume_list: + self.volume_list = self.volume_list.split(',') + self.volume_list = [el.strip() for el in self.volume_list] + if self.vfiler: + (major, minor) = self._get_ontapi_version() + self.client.set_api_version(major, minor) + self.client.set_vfiler(self.vfiler) + + def _get_avl_volume_by_size(self, size): + """Get the available volume by size.""" + vol_request = NaElement('volume-list-info') + res = self.client.invoke_successfully(vol_request, True) + volumes = res.get_child_by_name('volumes') + vols = volumes.get_children() + for vol in vols: + avl_size = vol.get_child_content('size-available') + state = vol.get_child_content('state') + if float(avl_size) >= float(size) and state == 'online': + avl_vol = dict() + avl_vol['name'] = vol.get_child_content('name') + avl_vol['block-type'] = vol.get_child_content('block-type') + avl_vol['type'] = vol.get_child_content('type') + avl_vol['size-available'] = avl_size + if self.volume_list: + if avl_vol['name'] in self.volume_list: + return avl_vol + else: + if self._check_vol_not_root(avl_vol): + return avl_vol + return None + + def _check_vol_not_root(self, vol): + """Checks if a volume is not root.""" + vol_options = NaElement.create_node_with_children( + 'volume-options-list-info', **{'volume': vol['name']}) + result = self.client.invoke_successfully(vol_options, True) + options = result.get_child_by_name('options') + ops = options.get_children() + for op in ops: + if op.get_child_content('name') == 'root' and\ + op.get_child_content('value') == 'true': + return False + return True + + def _get_igroup_by_initiator(self, initiator): + """Get igroups by initiator.""" + igroup_list = NaElement('igroup-list-info') + result = self.client.invoke_successfully(igroup_list, True) + igroups = [] + igs = result.get_child_by_name('initiator-groups') + if igs: + ig_infos = igs.get_children() + if ig_infos: + for info in ig_infos: + initiators = info.get_child_by_name('initiators') + init_infos = initiators.get_children() + if init_infos: + for init in init_infos: + if init.get_child_content('initiator-name')\ + == initiator: + d = dict() + d['initiator-group-os-type'] = \ + info.get_child_content( + 'initiator-group-os-type') + d['initiator-group-type'] = \ + info.get_child_content( + 'initiator-group-type') + d['initiator-group-name'] = \ + info.get_child_content( + 'initiator-group-name') + igroups.append(d) + return igroups + + def _get_target_details(self): + """Gets the target portal details.""" + iscsi_if_iter = NaElement('iscsi-portal-list-info') + result = self.client.invoke_successfully(iscsi_if_iter, True) + tgt_list = [] + portal_list_entries = result.get_child_by_name( + 'iscsi-portal-list-entries') + if portal_list_entries: + portal_list = portal_list_entries.get_children() + for iscsi_if in portal_list: + d = dict() + d['address'] = iscsi_if.get_child_content('ip-address') + d['port'] = iscsi_if.get_child_content('ip-port') + d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') + tgt_list.append(d) + return tgt_list + + def _get_iscsi_service_details(self): + """Returns iscsi iqn.""" + iscsi_service_iter = NaElement('iscsi-node-get-name') + result = self.client.invoke_successfully(iscsi_service_iter, True) + return result.get_child_content('node-name') + + def _create_lun_handle(self, metadata): + """Returns lun handle based on filer type.""" + if self.vfiler: + owner = '%s:%s' % (self.configuration.netapp_server_hostname, + self.vfiler) + else: + owner = self.configuration.netapp_server_hostname + return '%s:%s' % (owner, metadata['Path']) + + def _get_lun_list(self): + """Gets the list of luns on filer.""" + lun_list = [] + if self.volume_list: + for vol in self.volume_list: + try: + luns = self._get_vol_luns(vol) + if luns: + lun_list.extend(luns) + except NaApiError: + LOG.warn(_("Error finding luns for volume %(vol)s." + " Verify volume exists.") % locals()) + else: + luns = self._get_vol_luns(None) + lun_list.extend(luns) + self._extract_and_populate_luns(lun_list) + + def _get_vol_luns(self, vol_name): + """Gets the luns for a volume.""" + api = NaElement('lun-list-info') + if vol_name: + api.add_new_child('volume-name', vol_name) + result = self.client.invoke_successfully(api, True) + luns = result.get_child_by_name('luns') + return luns.get_children() + + def _find_mapped_lun_igroup(self, path, initiator, os=None): + """Find the igroup for mapped lun with initiator.""" + lun_map_list = NaElement.create_node_with_children( + 'lun-map-list-info', + **{'path': path}) + result = self.client.invoke_successfully(lun_map_list, True) + igroups = result.get_child_by_name('initiator-groups') + if igroups: + igroup = None + lun_id = None + found = False + igroup_infs = igroups.get_children() + for ig in igroup_infs: + initiators = ig.get_child_by_name('initiators') + init_infs = initiators.get_children() + for info in init_infs: + if info.get_child_content('initiator-name') == initiator: + found = True + igroup = ig.get_child_content('initiator-group-name') + lun_id = ig.get_child_content('lun-id') + break + if found: + break + return (igroup, lun_id) + + def _clone_lun(self, name, new_name, space_reserved): + """Clone LUN with the given handle to the new name.""" + metadata = self._get_lun_attr(name, 'metadata') + path = metadata['Path'] + (parent, splitter, name) = path.rpartition('/') + clone_path = '%s/%s' % (parent, new_name) + clone_start = NaElement.create_node_with_children( + 'clone-start', + **{'source-path': path, 'destination-path': clone_path, + 'no-snap': 'true'}) + result = self.client.invoke_successfully(clone_start, True) + clone_id_el = result.get_child_by_name('clone-id') + cl_id_info = clone_id_el.get_child_by_name('clone-id-info') + vol_uuid = cl_id_info.get_child_content('volume-uuid') + clone_id = cl_id_info.get_child_content('clone-op-id') + if vol_uuid: + self._check_clone_status(clone_id, vol_uuid, name, new_name) + cloned_lun = self._get_lun_by_args(path=clone_path) + if cloned_lun: + self._set_space_reserve(clone_path, space_reserved) + clone_meta = self._create_lun_meta(cloned_lun) + handle = self._create_lun_handle(clone_meta) + self._add_lun_to_table( + NetAppLun(handle, new_name, + cloned_lun.get_child_content('size'), + clone_meta)) + else: + raise NaApiError('ENOLUNENTRY', 'No Lun entry found on the filer') + + def _set_space_reserve(self, path, enable): + """Sets the space reserve info.""" + space_res = NaElement.create_node_with_children( + 'lun-set-space-reservation-info', + **{'path': path, 'enable': enable}) + self.client.invoke_successfully(space_res, True) + + def _check_clone_status(self, clone_id, vol_uuid, name, new_name): + """Checks for the job till completed.""" + clone_status = NaElement('clone-list-status') + cl_id = NaElement('clone-id') + clone_status.add_child_elem(cl_id) + cl_id.add_node_with_children( + 'clone-id-info', + **{'clone-op-id': clone_id, 'volume-uuid': vol_uuid}) + running = True + clone_ops_info = None + while running: + result = self.client.invoke_successfully(clone_status, True) + status = result.get_child_by_name('status') + ops_info = status.get_children() + if ops_info: + for info in ops_info: + if info.get_child_content('clone-state') == 'running': + time.sleep(1) + break + else: + running = False + clone_ops_info = info + break + else: + if clone_ops_info: + if clone_ops_info.get_child_content('clone-state')\ + == 'completed': + LOG.debug(_("Clone operation with src %(name)s" + " and dest %(new_name)s completed") % locals()) + else: + LOG.debug(_("Clone operation with src %(name)s" + " and dest %(new_name)s failed") % locals()) + raise NaApiError( + clone_ops_info.get_child_content('error'), + clone_ops_info.get_child_content('reason')) + + def _get_lun_by_args(self, **args): + """Retrives lun with specified args.""" + lun_info = NaElement.create_node_with_children('lun-list-info', **args) + result = self.client.invoke_successfully(lun_info, True) + luns = result.get_child_by_name('luns') + if luns: + infos = luns.get_children() + if infos: + return infos[0] + return None + + def _create_lun_meta(self, lun): + """Creates lun metadata dictionary.""" + self._is_naelement(lun) + meta_dict = {} + self._is_naelement(lun) + meta_dict['Path'] = lun.get_child_content('path') + meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') + meta_dict['SpaceReserved'] = lun.get_child_content( + 'is-space-reservation-enabled') + return meta_dict + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = (backend_name + or 'NetApp_iSCSI_7mode_direct') + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data diff --git a/cinder/volume/drivers/netapp/nfs.py b/cinder/volume/drivers/netapp/nfs.py new file mode 100644 index 0000000000..bb3890aae2 --- /dev/null +++ b/cinder/volume/drivers/netapp/nfs.py @@ -0,0 +1,624 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for NetApp NFS storage. +""" + +import copy +import os +import time + +from oslo.config import cfg +import suds +from suds.sax import text + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume.drivers.netapp.api import NaApiError +from cinder.volume.drivers.netapp.api import NaElement +from cinder.volume.drivers.netapp.api import NaServer +from cinder.volume.drivers.netapp.iscsi import netapp_opts +from cinder.volume.drivers import nfs + +LOG = logging.getLogger(__name__) + +netapp_nfs_opts = [ + cfg.IntOpt('synchronous_snapshot_create', + default=0, + help='Does snapshot creation call returns immediately')] + + +class NetAppNFSDriver(nfs.NfsDriver): + """Executes commands relating to Volumes.""" + def __init__(self, *args, **kwargs): + # NOTE(vish): db is set by Manager + self._execute = None + self._context = None + super(NetAppNFSDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_opts) + self.configuration.append_config_values(netapp_nfs_opts) + + def set_execute(self, execute): + self._execute = execute + + def do_setup(self, context): + self._context = context + self.check_for_setup_error() + self._client = self._get_client() + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_dfm_flags() + super(NetAppNFSDriver, self).check_for_setup_error() + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + vol_size = volume.size + snap_size = snapshot.volume_size + + if vol_size != snap_size: + msg = _('Cannot create volume of size %(vol_size)s from ' + 'snapshot of size %(snap_size)s') + raise exception.CinderException(msg % locals()) + + self._clone_volume(snapshot.name, volume.name, snapshot.volume_id) + share = self._get_volume_location(snapshot.volume_id) + + return {'provider_location': share} + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self._clone_volume(snapshot['volume_name'], + snapshot['name'], + snapshot['volume_id']) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + nfs_mount = self._get_provider_location(snapshot.volume_id) + + if self._volume_not_present(nfs_mount, snapshot.name): + return True + + self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), + run_as_root=True) + + def _check_dfm_flags(self): + """Raises error if any required configuration flag for OnCommand proxy + is missing.""" + required_flags = ['netapp_wsdl_url', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + for flag in required_flags: + if not getattr(self.configuration, flag, None): + raise exception.CinderException(_('%s is not set') % flag) + + def _get_client(self): + """Creates SOAP _client for ONTAP-7 DataFabric Service.""" + client = suds.client.Client( + self.configuration.netapp_wsdl_url, + username=self.configuration.netapp_login, + password=self.configuration.netapp_password) + soap_url = 'http://%s:%s/apis/soap/v1' % ( + self.configuration.netapp_server_hostname, + self.configuration.netapp_server_port) + client.set_options(location=soap_url) + + return client + + def _get_volume_location(self, volume_id): + """Returns NFS mount address as :""" + nfs_server_ip = self._get_host_ip(volume_id) + export_path = self._get_export_path(volume_id) + return (nfs_server_ip + ':' + export_path) + + def _clone_volume(self, volume_name, clone_name, volume_id): + """Clones mounted volume with OnCommand proxy API.""" + host_id = self._get_host_id(volume_id) + export_path = self._get_full_export_path(volume_id, host_id) + + request = self._client.factory.create('Request') + request.Name = 'clone-start' + + clone_start_args = ('%s/%s' + '%s/%s') + + request.Args = text.Raw(clone_start_args % (export_path, + volume_name, + export_path, + clone_name)) + + resp = self._client.service.ApiProxy(Target=host_id, + Request=request) + + if (resp.Status == 'passed' and + self.configuration.synchronous_snapshot_create): + clone_id = resp.Results['clone-id'][0] + clone_id_info = clone_id['clone-id-info'][0] + clone_operation_id = int(clone_id_info['clone-op-id'][0]) + + self._wait_for_clone_finished(clone_operation_id, host_id) + elif resp.Status == 'failed': + raise exception.CinderException(resp.Reason) + + def _wait_for_clone_finished(self, clone_operation_id, host_id): + """ + Polls ONTAP7 for clone status. Returns once clone is finished. + :param clone_operation_id: Identifier of ONTAP clone operation + """ + clone_list_options = ('' + '' + '%d' + '' + '' + '') + + request = self._client.factory.create('Request') + request.Name = 'clone-list-status' + request.Args = text.Raw(clone_list_options % clone_operation_id) + + resp = self._client.service.ApiProxy(Target=host_id, Request=request) + + while resp.Status != 'passed': + time.sleep(1) + resp = self._client.service.ApiProxy(Target=host_id, + Request=request) + + def _get_provider_location(self, volume_id): + """ + Returns provider location for given volume + :param volume_id: + """ + volume = self.db.volume_get(self._context, volume_id) + return volume.provider_location + + def _get_host_ip(self, volume_id): + """Returns IP address for the given volume.""" + return self._get_provider_location(volume_id).split(':')[0] + + def _get_export_path(self, volume_id): + """Returns NFS export path for the given volume.""" + return self._get_provider_location(volume_id).split(':')[1] + + def _get_host_id(self, volume_id): + """Returns ID of the ONTAP-7 host.""" + host_ip = self._get_host_ip(volume_id) + server = self._client.service + + resp = server.HostListInfoIterStart(ObjectNameOrId=host_ip) + tag = resp.Tag + + try: + res = server.HostListInfoIterNext(Tag=tag, Maximum=1) + if hasattr(res, 'Hosts') and res.Hosts.HostInfo: + return res.Hosts.HostInfo[0].HostId + finally: + server.HostListInfoIterEnd(Tag=tag) + + def _get_full_export_path(self, volume_id, host_id): + """Returns full path to the NFS share, e.g. /vol/vol0/home.""" + export_path = self._get_export_path(volume_id) + command_args = '%s' + + request = self._client.factory.create('Request') + request.Name = 'nfs-exportfs-storage-path' + request.Args = text.Raw(command_args % export_path) + + resp = self._client.service.ApiProxy(Target=host_id, + Request=request) + + if resp.Status == 'passed': + return resp.Results['actual-pathname'][0] + elif resp.Status == 'failed': + raise exception.CinderException(resp.Reason) + + def _volume_not_present(self, nfs_mount, volume_name): + """Check if volume exists.""" + try: + self._try_execute('ls', self._get_volume_path(nfs_mount, + volume_name)) + except exception.ProcessExecutionError: + # If the volume isn't present + return True + return False + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except exception.ProcessExecutionError: + tries = tries + 1 + if tries >= self.configuration.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + def _get_volume_path(self, nfs_share, volume_name): + """Get volume path (local fs path) for given volume name on given nfs + share + @param nfs_share string, example 172.18.194.100:/var/nfs + @param volume_name string, + example volume-91ee65ec-c473-4391-8c09-162b00c68a8c + """ + return os.path.join(self._get_mount_point_for_share(nfs_share), + volume_name) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + vol_size = volume.size + src_vol_size = src_vref.size + + if vol_size != src_vol_size: + msg = _('Cannot create clone of size %(vol_size)s from ' + 'volume of size %(src_vol_size)s') + raise exception.CinderException(msg % locals()) + + self._clone_volume(src_vref.name, volume.name, src_vref.id) + share = self._get_volume_location(src_vref.id) + + return {'provider_location': share} + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + super(NetAppNFSDriver, self)._update_volume_status() + + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats["volume_backend_name"] = (backend_name or + 'NetApp_NFS_7mode') + self._stats["vendor_name"] = 'NetApp' + self._stats["driver_version"] = '1.0' + + +class NetAppCmodeNfsDriver (NetAppNFSDriver): + """Executes commands related to volumes on c mode.""" + + def __init__(self, *args, **kwargs): + super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + self._context = context + self.check_for_setup_error() + self._client = self._get_client() + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + def _clone_volume(self, volume_name, clone_name, volume_id): + """Clones mounted volume with NetApp Cloud Services.""" + host_ip = self._get_host_ip(volume_id) + export_path = self._get_export_path(volume_id) + LOG.debug(_("""Cloning with params ip %(host_ip)s, exp_path + %(export_path)s, vol %(volume_name)s, + clone_name %(clone_name)s""") % locals()) + self._client.service.CloneNasFile(host_ip, export_path, + volume_name, clone_name) + + def _check_flags(self): + """Raises error if any required configuration flag for NetApp Cloud + Webservices is missing.""" + required_flags = ['netapp_wsdl_url', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + for flag in required_flags: + if not getattr(self.configuration, flag, None): + raise exception.CinderException(_('%s is not set') % flag) + + def _get_client(self): + """Creates SOAP _client for NetApp Cloud service.""" + client = suds.client.Client( + self.configuration.netapp_wsdl_url, + username=self.configuration.netapp_login, + password=self.configuration.netapp_password) + return client + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + super(NetAppCmodeNfsDriver, self)._update_volume_status() + + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats["volume_backend_name"] = (backend_name or + 'NetApp_NFS_Cluster') + self._stats["vendor_name"] = 'NetApp' + self._stats["driver_version"] = '1.0' + + +class NetAppDirectNfsDriver (NetAppNFSDriver): + """Executes commands related to volumes on NetApp filer.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirectNfsDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + self._context = context + self.check_for_setup_error() + self._client = self._get_client() + self._do_custom_setup(self._client) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + def _clone_volume(self, volume_name, clone_name, volume_id): + """Clones mounted volume on NetApp filer.""" + raise NotImplementedError() + + def _check_flags(self): + """Raises error if any required configuration flag for NetApp + filer is missing.""" + required_flags = ['netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port', + 'netapp_transport_type'] + for flag in required_flags: + if not getattr(self.configuration, flag, None): + raise exception.CinderException(_('%s is not set') % flag) + + def _get_client(self): + """Creates NetApp api client.""" + client = NaServer( + host=self.configuration.netapp_server_hostname, + server_type=NaServer.SERVER_TYPE_FILER, + transport_type=self.configuration.netapp_transport_type, + style=NaServer.STYLE_LOGIN_PASSWORD, + username=self.configuration.netapp_login, + password=self.configuration.netapp_password) + return client + + def _do_custom_setup(self, client): + """Do the customized set up on client if any for different types.""" + raise NotImplementedError() + + def _is_naelement(self, elem): + """Checks if element is NetApp element.""" + if not isinstance(elem, NaElement): + raise ValueError('Expects NaElement') + + def _invoke_successfully(self, na_element, vserver=None): + """Invoke the api for successful result. + + If vserver is present then invokes vserver/vfiler api + else filer/Cluster api. + :param vserver: vserver/vfiler name. + """ + self._is_naelement(na_element) + server = copy.copy(self._client) + if vserver: + server.set_vserver(vserver) + else: + server.set_vserver(None) + result = server.invoke_successfully(na_element, True) + return result + + def _get_ontapi_version(self): + """Gets the supported ontapi version.""" + ontapi_version = NaElement('system-get-ontapi-version') + res = self._invoke_successfully(ontapi_version, False) + major = res.get_child_content('major-version') + minor = res.get_child_content('minor-version') + return (major, minor) + + +class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): + """Executes commands related to volumes on c mode.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs) + + def _do_custom_setup(self, client): + """Do the customized set up on client for cluster mode.""" + # Default values to run first api + client.set_api_version(1, 15) + (major, minor) = self._get_ontapi_version() + client.set_api_version(major, minor) + + def _clone_volume(self, volume_name, clone_name, volume_id): + """Clones mounted volume on NetApp Cluster.""" + host_ip = self._get_host_ip(volume_id) + export_path = self._get_export_path(volume_id) + ifs = self._get_if_info_by_ip(host_ip) + vserver = ifs[0].get_child_content('vserver') + exp_volume = self._get_vol_by_junc_vserver(vserver, export_path) + self._clone_file(exp_volume, volume_name, clone_name, vserver) + + def _get_if_info_by_ip(self, ip): + """Gets the network interface info by ip.""" + net_if_iter = NaElement('net-interface-get-iter') + net_if_iter.add_new_child('max-records', '10') + query = NaElement('query') + net_if_iter.add_child_elem(query) + query.add_node_with_children('net-interface-info', **{'address': ip}) + result = self._invoke_successfully(net_if_iter) + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + return attr_list.get_children() + raise exception.NotFound( + _('No interface found on cluster for ip %s') + % (ip)) + + def _get_vol_by_junc_vserver(self, vserver, junction): + """Gets the volume by junction path and vserver.""" + vol_iter = NaElement('volume-get-iter') + vol_iter.add_new_child('max-records', '10') + query = NaElement('query') + vol_iter.add_child_elem(query) + vol_attrs = NaElement('volume-attributes') + query.add_child_elem(vol_attrs) + vol_attrs.add_node_with_children( + 'volume-id-attributes', + **{'junction-path': junction, + 'owning-vserver-name': vserver}) + des_attrs = NaElement('desired-attributes') + des_attrs.add_node_with_children('volume-attributes', + **{'volume-id-attributes': None}) + vol_iter.add_child_elem(des_attrs) + result = self._invoke_successfully(vol_iter, vserver) + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + vols = attr_list.get_children() + vol_id = vols[0].get_child_by_name('volume-id-attributes') + return vol_id.get_child_content('name') + raise exception.NotFound(_("""No volume on cluster with vserver + %(vserver)s and junction path %(junction)s + """) % locals()) + + def _clone_file(self, volume, src_path, dest_path, vserver=None): + """Clones file on vserver.""" + LOG.debug(_("""Cloning with params volume %(volume)s,src %(src_path)s, + dest %(dest_path)s, vserver %(vserver)s""") + % locals()) + clone_create = NaElement.create_node_with_children( + 'clone-create', + **{'volume': volume, 'source-path': src_path, + 'destination-path': dest_path}) + self._invoke_successfully(clone_create, vserver) + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + super(NetAppDirectCmodeNfsDriver, self)._update_volume_status() + + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats["volume_backend_name"] = (backend_name or + 'NetApp_NFS_cluster_direct') + self._stats["vendor_name"] = 'NetApp' + self._stats["driver_version"] = '1.0' + + +class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): + """Executes commands related to volumes on 7 mode.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirect7modeNfsDriver, self).__init__(*args, **kwargs) + + def _do_custom_setup(self, client): + """Do the customized set up on client if any for 7 mode.""" + (major, minor) = self._get_ontapi_version() + client.set_api_version(major, minor) + + def _clone_volume(self, volume_name, clone_name, volume_id): + """Clones mounted volume with NetApp filer.""" + export_path = self._get_export_path(volume_id) + storage_path = self._get_actual_path_for_export(export_path) + target_path = '%s/%s' % (storage_path, clone_name) + (clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path, + volume_name), + target_path) + if vol_uuid: + try: + self._wait_for_clone_finish(clone_id, vol_uuid) + except NaApiError as e: + if e.code != 'UnknownCloneId': + self._clear_clone(clone_id) + raise e + + def _get_actual_path_for_export(self, export_path): + """Gets the actual path on the filer for export path.""" + storage_path = NaElement.create_node_with_children( + 'nfs-exportfs-storage-path', **{'pathname': export_path}) + result = self._invoke_successfully(storage_path, None) + if result.get_child_content('actual-pathname'): + return result.get_child_content('actual-pathname') + raise exception.NotFound(_('No storage path found for export path %s') + % (export_path)) + + def _start_clone(self, src_path, dest_path): + """Starts the clone operation. + + :returns: clone-id + """ + LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""") + % locals()) + clone_start = NaElement.create_node_with_children( + 'clone-start', + **{'source-path': src_path, + 'destination-path': dest_path, + 'no-snap': 'true'}) + result = self._invoke_successfully(clone_start, None) + clone_id_el = result.get_child_by_name('clone-id') + cl_id_info = clone_id_el.get_child_by_name('clone-id-info') + vol_uuid = cl_id_info.get_child_content('volume-uuid') + clone_id = cl_id_info.get_child_content('clone-op-id') + return (clone_id, vol_uuid) + + def _wait_for_clone_finish(self, clone_op_id, vol_uuid): + """Waits till a clone operation is complete or errored out.""" + clone_ls_st = NaElement('clone-list-status') + clone_id = NaElement('clone-id') + clone_ls_st.add_child_elem(clone_id) + clone_id.add_node_with_children('clone-id-info', + **{'clone-op-id': clone_op_id, + 'volume-uuid': vol_uuid}) + task_running = True + while task_running: + result = self._invoke_successfully(clone_ls_st, None) + status = result.get_child_by_name('status') + ops_info = status.get_children() + if ops_info: + state = ops_info[0].get_child_content('clone-state') + if state == 'completed': + task_running = False + elif state == 'failed': + code = ops_info[0].get_child_content('error') + reason = ops_info[0].get_child_content('reason') + raise NaApiError(code, reason) + else: + time.sleep(1) + else: + raise NaApiError( + 'UnknownCloneId', + 'No clone operation for clone id %s found on the filer' + % (clone_id)) + + def _clear_clone(self, clone_id): + """Clear the clone information. + + Invoke this in case of failed clone. + """ + clone_clear = NaElement.create_node_with_children( + 'clone-clear', + **{'clone-id': clone_id}) + retry = 3 + while retry: + try: + self._invoke_successfully(clone_clear, None) + break + except Exception as e: + # Filer might be rebooting + time.sleep(5) + retry = retry - 1 + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + super(NetAppDirect7modeNfsDriver, self)._update_volume_status() + + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats["volume_backend_name"] = (backend_name or + 'NetApp_NFS_7mode_direct') + self._stats["vendor_name"] = 'NetApp' + self._stats["driver_version"] = '1.0' diff --git a/cinder/volume/drivers/nexenta/__init__.py b/cinder/volume/drivers/nexenta/__init__.py new file mode 100644 index 0000000000..3050df8f66 --- /dev/null +++ b/cinder/volume/drivers/nexenta/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta` -- Package contains Nexenta-specific modules +===================================================================== + +.. automodule:: nexenta +.. moduleauthor:: Yuriy Taraday +""" + + +class NexentaException(Exception): + MESSAGE = _('Nexenta SA returned the error') + + def __init__(self, error=None): + super(NexentaException, self).__init__(self.message, error) + + def __str__(self): + return '%s: %s' % self.args diff --git a/cinder/volume/drivers/nexenta/jsonrpc.py b/cinder/volume/drivers/nexenta/jsonrpc.py new file mode 100644 index 0000000000..ddeb5bdd87 --- /dev/null +++ b/cinder/volume/drivers/nexenta/jsonrpc.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client +===================================================================== + +.. automodule:: nexenta.jsonrpc +.. moduleauthor:: Yuriy Taraday +""" + +import urllib2 + +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder.volume.drivers import nexenta + +LOG = logging.getLogger(__name__) + + +class NexentaJSONException(nexenta.NexentaException): + pass + + +class NexentaJSONProxy(object): + def __init__(self, url, user, password, auto=False, obj=None, method=None): + self.url = url + self.user = user + self.password = password + self.auto = auto + self.obj = obj + self.method = method + + def __getattr__(self, name): + if not self.obj: + obj, method = name, None + elif not self.method: + obj, method = self.obj, name + else: + obj, method = '%s.%s' % (self.obj, self.method), name + return NexentaJSONProxy(self.url, self.user, self.password, self.auto, + obj, method) + + def __call__(self, *args): + data = jsonutils.dumps({'object': self.obj, + 'method': self.method, + 'params': args}) + auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1] + headers = {'Content-Type': 'application/json', + 'Authorization': 'Basic %s' % (auth,)} + LOG.debug(_('Sending JSON data: %s'), data) + request = urllib2.Request(self.url, data, headers) + response_obj = urllib2.urlopen(request) + if response_obj.info().status == 'EOF in headers': + if self.auto and self.url.startswith('http://'): + LOG.info(_('Auto switching to HTTPS connection to %s'), + self.url) + self.url = 'https' + self.url[4:] + request = urllib2.Request(self.url, data, headers) + response_obj = urllib2.urlopen(request) + else: + LOG.error(_('No headers in server response')) + raise NexentaJSONException(_('Bad response from server')) + + response_data = response_obj.read() + LOG.debug(_('Got response: %s'), response_data) + response = jsonutils.loads(response_data) + if response.get('error') is not None: + raise NexentaJSONException(response['error'].get('message', '')) + else: + return response.get('result') diff --git a/cinder/volume/drivers/nexenta/volume.py b/cinder/volume/drivers/nexenta/volume.py new file mode 100644 index 0000000000..94b73fd3aa --- /dev/null +++ b/cinder/volume/drivers/nexenta/volume.py @@ -0,0 +1,353 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance +===================================================================== + +.. automodule:: nexenta.volume +.. moduleauthor:: Yuriy Taraday +""" + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers import nexenta +from cinder.volume.drivers.nexenta import jsonrpc + +VERSION = '1.0' +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + +nexenta_opts = [ + cfg.StrOpt('nexenta_host', + default='', + help='IP address of Nexenta SA'), + cfg.IntOpt('nexenta_rest_port', + default=2000, + help='HTTP port to connect to Nexenta REST API server'), + cfg.StrOpt('nexenta_rest_protocol', + default='auto', + help='Use http or https for REST connection (default auto)'), + cfg.StrOpt('nexenta_user', + default='admin', + help='User name to connect to Nexenta SA'), + cfg.StrOpt('nexenta_password', + default='nexenta', + help='Password to connect to Nexenta SA', + secret=True), + cfg.IntOpt('nexenta_iscsi_target_portal_port', + default=3260, + help='Nexenta target portal port'), + cfg.StrOpt('nexenta_volume', + default='cinder', + help='pool on SA that will hold all volumes'), + cfg.StrOpt('nexenta_target_prefix', + default='iqn.1986-03.com.sun:02:cinder-', + help='IQN prefix for iSCSI targets'), + cfg.StrOpt('nexenta_target_group_prefix', + default='cinder/', + help='prefix for iSCSI target groups on SA'), + cfg.StrOpt('nexenta_blocksize', + default='', + help='block size for volumes (blank=default,8KB)'), + cfg.BoolOpt('nexenta_sparse', + default=False, + help='flag to create sparse volumes'), +] +FLAGS.register_opts(nexenta_opts) + + +class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921 + """Executes volume driver commands on Nexenta Appliance.""" + + def __init__(self, *args, **kwargs): + super(NexentaDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + protocol = FLAGS.nexenta_rest_protocol + auto = protocol == 'auto' + if auto: + protocol = 'http' + self.nms = jsonrpc.NexentaJSONProxy( + '%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host, + FLAGS.nexenta_rest_port), + FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto) + + def check_for_setup_error(self): + """Verify that the volume for our zvols exists. + + :raise: :py:exc:`LookupError` + """ + if not self.nms.volume.object_exists(FLAGS.nexenta_volume): + raise LookupError(_("Volume %s does not exist in Nexenta SA"), + FLAGS.nexenta_volume) + + @staticmethod + def _get_zvol_name(volume_name): + """Return zvol name that corresponds given volume name.""" + return '%s/%s' % (FLAGS.nexenta_volume, volume_name) + + @staticmethod + def _get_target_name(volume_name): + """Return iSCSI target name to access volume.""" + return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name) + + @staticmethod + def _get_target_group_name(volume_name): + """Return Nexenta iSCSI target group name for volume.""" + return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name) + + def create_volume(self, volume): + """Create a zvol on appliance. + + :param volume: volume reference + """ + self.nms.zvol.create( + self._get_zvol_name(volume['name']), + '%sG' % (volume['size'],), + FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse) + + def delete_volume(self, volume): + """Destroy a zvol on appliance. + + :param volume: volume reference + """ + try: + self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '') + except nexenta.NexentaException as exc: + if "zvol has children" in exc.args[1]: + raise exception.VolumeIsBusy(volume_name=volume['name']) + else: + raise + + def create_snapshot(self, snapshot): + """Create snapshot of existing zvol on appliance. + + :param snapshot: shapshot reference + """ + self.nms.zvol.create_snapshot( + self._get_zvol_name(snapshot['volume_name']), + snapshot['name'], '') + + def create_volume_from_snapshot(self, volume, snapshot): + """Create new volume from other's snapshot on appliance. + + :param volume: reference of volume to be created + :param snapshot: reference of source snapshot + """ + self.nms.zvol.clone( + '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), + snapshot['name']), + self._get_zvol_name(volume['name'])) + + def delete_snapshot(self, snapshot): + """Delete volume's snapshot on appliance. + + :param snapshot: shapshot reference + """ + try: + self.nms.snapshot.destroy( + '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), + snapshot['name']), + '') + except nexenta.NexentaException as exc: + if "snapshot has dependent clones" in exc.args[1]: + raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) + else: + raise + + def local_path(self, volume): + """Return local path to existing local volume. + + We never have local volumes, so it raises NotImplementedError. + + :raise: :py:exc:`NotImplementedError` + """ + raise NotImplementedError + + def _do_export(self, _ctx, volume, ensure=False): + """Do all steps to get zvol exported as LUN 0 at separate target. + + :param volume: reference of volume to be exported + :param ensure: if True, ignore errors caused by already existing + resources + :return: iscsiadm-formatted provider location string + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + + try: + self.nms.iscsitarget.create_target({'target_name': target_name}) + except nexenta.NexentaException as exc: + if not ensure or 'already configured' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored target creation error "%s"' + ' while ensuring export'), exc) + try: + self.nms.stmf.create_targetgroup(target_group_name) + except nexenta.NexentaException as exc: + if not ensure or 'already exists' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored target group creation error "%s"' + ' while ensuring export'), exc) + try: + self.nms.stmf.add_targetgroup_member(target_group_name, + target_name) + except nexenta.NexentaException as exc: + if not ensure or 'already exists' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored target group member addition error "%s"' + ' while ensuring export'), exc) + try: + self.nms.scsidisk.create_lu(zvol_name, {}) + except nexenta.NexentaException as exc: + if not ensure or 'in use' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored LU creation error "%s"' + ' while ensuring export'), exc) + try: + self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { + 'target_group': target_group_name, + 'lun': '0'}) + except nexenta.NexentaException as exc: + if not ensure or 'view entry exists' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored LUN mapping entry addition error "%s"' + ' while ensuring export'), exc) + return '%s:%s,1 %s 0' % (FLAGS.nexenta_host, + FLAGS.nexenta_iscsi_target_portal_port, + target_name) + + def create_export(self, _ctx, volume): + """Create new export for zvol. + + :param volume: reference of volume to be exported + :return: iscsiadm-formatted provider location string + """ + loc = self._do_export(_ctx, volume, ensure=False) + return {'provider_location': loc} + + def ensure_export(self, _ctx, volume): + """Recreate parts of export if necessary. + + :param volume: reference of volume to be exported + """ + self._do_export(_ctx, volume, ensure=True) + + def remove_export(self, _ctx, volume): + """Destroy all resources created to export zvol. + + :param volume: reference of volume to be unexported + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + self.nms.scsidisk.delete_lu(zvol_name) + + try: + self.nms.stmf.destroy_targetgroup(target_group_name) + except nexenta.NexentaException as exc: + # We assume that target group is already gone + LOG.warn(_('Got error trying to destroy target group' + ' %(target_group)s, assuming it is ' + 'already gone: %(exc)s'), + {'target_group': target_group_name, 'exc': exc}) + try: + self.nms.iscsitarget.delete_target(target_name) + except nexenta.NexentaException as exc: + # We assume that target is gone as well + LOG.warn(_('Got error trying to delete target %(target)s,' + ' assuming it is already gone: %(exc)s'), + {'target': target_name, 'exc': exc}) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + raise NotImplementedError() + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + raise NotImplementedError() + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first.""" + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info for Nexenta device.""" + + # NOTE(jdg): Aimon Bustardo was kind enough to point out the + # info he had regarding Nexenta Capabilities, ideally it would + # be great if somebody from Nexenta looked this over at some point + + KB = 1024 + MB = KB ** 2 + + LOG.debug(_("Updating volume status")) + data = {} + backend_name = self.__class__.__name__ + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or self.__class__.__name__ + data["vendor_name"] = 'Nexenta' + data["driver_version"] = VERSION + data["storage_protocol"] = 'iSCSI' + + stats = self.nms.volume.get_child_props(FLAGS.nexenta_volume, + 'health|size|used|available') + total_unit = stats['size'][-1] + total_amount = float(stats['size'][:-1]) + free_unit = stats['available'][-1] + free_amount = float(stats['available'][:-1]) + + if total_unit == "T": + total_amount = total_amount * KB + elif total_unit == "M": + total_amount = total_amount / KB + elif total_unit == "B": + total_amount = total_amount / MB + + if free_unit == "T": + free_amount = free_amount * KB + elif free_unit == "M": + free_amount = free_amount / KB + elif free_unit == "B": + free_amount = free_amount / MB + + data['total_capacity_gb'] = total_amount + data['free_capacity_gb'] = free_amount + + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py new file mode 100755 index 0000000000..5914ed72ab --- /dev/null +++ b/cinder/volume/drivers/nfs.py @@ -0,0 +1,357 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import hashlib +import os + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.volume import driver + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('nfs_shares_config', + default='/etc/cinder/nfs_shares', + help='File with the list of available nfs shares'), + cfg.StrOpt('nfs_mount_point_base', + default='$state_path/mnt', + help='Base dir containing mount points for nfs shares'), + cfg.StrOpt('nfs_disk_util', + default='df', + help='Use du or df for free space calculation'), + cfg.BoolOpt('nfs_sparsed_volumes', + default=True, + help=('Create volumes as sparsed files which take no space.' + 'If set to False volume is created as regular file.' + 'In such case volume creation takes a lot of time.')), + cfg.StrOpt('nfs_mount_options', + default=None, + help='Mount options passed to the nfs client. See section ' + 'of the nfs man page for details'), +] + +VERSION = '1.0' + + +class RemoteFsDriver(driver.VolumeDriver): + """Common base for drivers that work like NFS.""" + + def check_for_setup_error(self): + """Just to override parent behavior.""" + pass + + def create_volume(self, volume): + raise NotImplementedError() + + def delete_volume(self, volume): + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Do nothing for this driver, but allow manager to handle deletion + of snapshot in error state.""" + pass + + def ensure_export(self, ctx, volume): + raise NotImplementedError() + + def _create_sparsed_file(self, path, size): + """Creates file with 0 disk usage.""" + self._execute('truncate', '-s', '%sG' % size, + path, run_as_root=True) + + def _create_regular_file(self, path, size): + """Creates regular file of given size. Takes a lot of time for large + files.""" + KB = 1024 + MB = KB * 1024 + GB = MB * 1024 + + block_size_mb = 1 + block_count = size * GB / (block_size_mb * MB) + + self._execute('dd', 'if=/dev/zero', 'of=%s' % path, + 'bs=%dM' % block_size_mb, + 'count=%d' % block_count, + run_as_root=True) + + def _set_rw_permissions_for_all(self, path): + """Sets 666 permissions for the path.""" + self._execute('chmod', 'ugo+rw', path, run_as_root=True) + + def local_path(self, volume): + """Get volume path (mounted locally fs path) for given volume + :param volume: volume reference + """ + nfs_share = volume['provider_location'] + return os.path.join(self._get_mount_point_for_share(nfs_share), + volume['name']) + + def _get_hash_str(self, base_str): + """returns string that represents hash of base_str + (in a hex format).""" + return hashlib.md5(base_str).hexdigest() + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume)) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + +class NfsDriver(RemoteFsDriver): + """NFS based cinder driver. Creates file on NFS share for using it + as block device on hypervisor.""" + def __init__(self, *args, **kwargs): + super(NfsDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + + def do_setup(self, context): + """Any initialization the volume driver does while starting""" + super(NfsDriver, self).do_setup(context) + + config = self.configuration.nfs_shares_config + if not config: + msg = (_("There's no NFS config file configured (%s)") % + 'nfs_shares_config') + LOG.warn(msg) + raise exception.NfsException(msg) + if not os.path.exists(config): + msg = _("NFS config file at %(config)s doesn't exist") % locals() + LOG.warn(msg) + raise exception.NfsException(msg) + + try: + self._execute('mount.nfs', check_exit_code=False) + except OSError as exc: + if exc.errno == errno.ENOENT: + raise exception.NfsException('mount.nfs is not installed') + else: + raise + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def create_volume(self, volume): + """Creates a volume""" + + self._ensure_shares_mounted() + + volume['provider_location'] = self._find_share(volume['size']) + + LOG.info(_('casted to %s') % volume['provider_location']) + + self._do_create_volume(volume) + + return {'provider_location': volume['provider_location']} + + def delete_volume(self, volume): + """Deletes a logical volume.""" + + if not volume['provider_location']: + LOG.warn(_('Volume %s does not have provider_location specified, ' + 'skipping'), volume['name']) + return + + self._ensure_share_mounted(volume['provider_location']) + + mounted_path = self.local_path(volume) + + self._execute('rm', '-f', mounted_path, run_as_root=True) + + def ensure_export(self, ctx, volume): + """Synchronously recreates an export for a logical volume.""" + self._ensure_share_mounted(volume['provider_location']) + + def create_export(self, ctx, volume): + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" + pass + + def remove_export(self, ctx, volume): + """Removes an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + data = {'export': volume['provider_location'], + 'name': volume['name']} + return { + 'driver_volume_type': 'nfs', + 'data': data + } + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector""" + pass + + def _do_create_volume(self, volume): + """Create a volume on given nfs_share + :param volume: volume reference + """ + volume_path = self.local_path(volume) + volume_size = volume['size'] + + if self.configuration.nfs_sparsed_volumes: + self._create_sparsed_file(volume_path, volume_size) + else: + self._create_regular_file(volume_path, volume_size) + + self._set_rw_permissions_for_all(volume_path) + + def _ensure_shares_mounted(self): + """Look for NFS shares in the flags and tries to mount them locally""" + self._mounted_shares = [] + + for share in self._load_shares_config(): + try: + self._ensure_share_mounted(share) + self._mounted_shares.append(share) + except Exception, exc: + LOG.warning(_('Exception during mounting %s') % (exc,)) + + LOG.debug('Available shares %s' % str(self._mounted_shares)) + + def _load_shares_config(self): + return [share.strip() for share in + open(self.configuration.nfs_shares_config) + if share and not share.startswith('#')] + + def _ensure_share_mounted(self, nfs_share): + """Mount NFS share + :param nfs_share: + """ + mount_path = self._get_mount_point_for_share(nfs_share) + self._mount_nfs(nfs_share, mount_path, ensure=True) + + def _find_share(self, volume_size_for): + """Choose NFS share among available ones for given volume size. Current + implementation looks for greatest capacity + :param volume_size_for: int size in Gb + """ + + if not self._mounted_shares: + raise exception.NfsNoSharesMounted() + + greatest_size = 0 + greatest_share = None + + for nfs_share in self._mounted_shares: + capacity = self._get_available_capacity(nfs_share)[0] + if capacity > greatest_size: + greatest_share = nfs_share + greatest_size = capacity + + if volume_size_for * 1024 * 1024 * 1024 > greatest_size: + raise exception.NfsNoSuitableShareFound( + volume_size=volume_size_for) + return greatest_share + + def _get_mount_point_for_share(self, nfs_share): + """ + :param nfs_share: example 172.18.194.100:/var/nfs + """ + return os.path.join(self.configuration.nfs_mount_point_base, + self._get_hash_str(nfs_share)) + + def _get_available_capacity(self, nfs_share): + """Calculate available space on the NFS share + :param nfs_share: example 172.18.194.100:/var/nfs + """ + mount_point = self._get_mount_point_for_share(nfs_share) + + out, _ = self._execute('df', '-P', '-B', '1', mount_point, + run_as_root=True) + out = out.splitlines()[1] + + available = 0 + + size = int(out.split()[1]) + if self.configuration.nfs_disk_util == 'df': + available = int(out.split()[3]) + else: + out, _ = self._execute('du', '-sb', '--apparent-size', + '--exclude', '*snapshot*', mount_point, + run_as_root=True) + used = int(out.split()[0]) + available = size - used + + return available, size + + def _mount_nfs(self, nfs_share, mount_path, ensure=False): + """Mount NFS share to mount path""" + self._execute('mkdir', '-p', mount_path) + + # Construct the NFS mount command. + nfs_cmd = ['mount', '-t', 'nfs'] + if self.configuration.nfs_mount_options is not None: + nfs_cmd.extend(['-o', self.configuration.nfs_mount_options]) + nfs_cmd.extend([nfs_share, mount_path]) + + try: + self._execute(*nfs_cmd, run_as_root=True) + except exception.ProcessExecutionError as exc: + if ensure and 'already mounted' in exc.stderr: + LOG.warn(_("%s is already mounted"), nfs_share) + else: + raise + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first.""" + if refresh or not self._stats: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'Generic_NFS' + data["vendor_name"] = 'Open Source' + data["driver_version"] = VERSION + data["storage_protocol"] = 'nfs' + + self._ensure_shares_mounted() + + global_capacity = 0 + global_free = 0 + for nfs_share in self._mounted_shares: + free, capacity = self._get_available_capacity(nfs_share) + global_capacity += capacity + global_free += free + + data['total_capacity_gb'] = global_capacity / 1024.0 ** 3 + data['free_capacity_gb'] = global_free / 1024.0 ** 3 + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py new file mode 100644 index 0000000000..f31c4cdaeb --- /dev/null +++ b/cinder/volume/drivers/rbd.py @@ -0,0 +1,306 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +RADOS Block Device Driver +""" + +import json +import os +import tempfile +import urllib + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver + +LOG = logging.getLogger(__name__) + +rbd_opts = [ + cfg.StrOpt('rbd_pool', + default='rbd', + help='the RADOS pool in which rbd volumes are stored'), + cfg.StrOpt('rbd_user', + default=None, + help='the RADOS client name for accessing rbd volumes'), + cfg.StrOpt('rbd_secret_uuid', + default=None, + help='the libvirt uuid of the secret for the rbd_user' + 'volumes'), + cfg.StrOpt('volume_tmp_dir', + default=None, + help='where to store temporary image files if the volume ' + 'driver does not write them directly to the volume'), ] + +VERSION = '1.0' + + +class RBDDriver(driver.VolumeDriver): + """Implements RADOS block device (RBD) volume commands""" + def __init__(self, *args, **kwargs): + super(RBDDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(rbd_opts) + self._stats = {} + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + (stdout, stderr) = self._execute('rados', 'lspools') + pools = stdout.split("\n") + if self.configuration.rbd_pool not in pools: + exception_message = (_("rbd has no pool %s") % + self.configuration.rbd_pool) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _update_volume_stats(self): + stats = {'vendor_name': 'Open Source', + 'driver_version': VERSION, + 'storage_protocol': 'ceph', + 'total_capacity_gb': 'unknown', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0} + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or 'RBD' + + try: + stdout, _err = self._execute('rados', 'df', '--format', 'json') + new_stats = json.loads(stdout) + total = int(new_stats['total_space']) / 1024 ** 2 + free = int(new_stats['total_avail']) / 1024 ** 2 + stats['total_capacity_gb'] = total + stats['free_capacity_gb'] = free + except exception.ProcessExecutionError: + # just log and return unknown capacities + LOG.exception(_('error refreshing volume stats')) + self._stats = stats + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + if refresh: + self._update_volume_stats() + return self._stats + + def _supports_layering(self): + stdout, _ = self._execute('rbd', '--help') + return 'clone' in stdout + + def create_cloned_volume(self, volume, src_vref): + """Clone a logical volume""" + self._try_execute('rbd', 'cp', + '--pool', self.configuration.rbd_pool, + '--image', src_vref['name'], + '--dest-pool', self.configuration.rbd_pool, + '--dest', volume['name']) + + def create_volume(self, volume): + """Creates a logical volume.""" + if int(volume['size']) == 0: + size = 100 + else: + size = int(volume['size']) * 1024 + args = ['rbd', 'create', + '--pool', self.configuration.rbd_pool, + '--size', size, + volume['name']] + if self._supports_layering(): + args += ['--new-format'] + self._try_execute(*args) + + def _clone(self, volume, src_pool, src_image, src_snap): + self._try_execute('rbd', 'clone', + '--pool', src_pool, + '--image', src_image, + '--snap', src_snap, + '--dest-pool', self.configuration.rbd_pool, + '--dest', volume['name']) + + def _resize(self, volume): + size = int(volume['size']) * 1024 + self._try_execute('rbd', 'resize', + '--pool', self.configuration.rbd_pool, + '--image', volume['name'], + '--size', size) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self._clone(volume, self.configuration.rbd_pool, + snapshot['volume_name'], snapshot['name']) + if int(volume['size']): + self._resize(volume) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + stdout, _ = self._execute('rbd', 'snap', 'ls', + '--pool', self.configuration.rbd_pool, + volume['name']) + if stdout.count('\n') > 1: + raise exception.VolumeIsBusy(volume_name=volume['name']) + self._try_execute('rbd', 'rm', + '--pool', self.configuration.rbd_pool, + volume['name']) + + def create_snapshot(self, snapshot): + """Creates an rbd snapshot""" + self._try_execute('rbd', 'snap', 'create', + '--pool', self.configuration.rbd_pool, + '--snap', snapshot['name'], + snapshot['volume_name']) + if self._supports_layering(): + self._try_execute('rbd', 'snap', 'protect', + '--pool', self.configuration.rbd_pool, + '--snap', snapshot['name'], + snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes an rbd snapshot""" + if self._supports_layering(): + try: + self._try_execute('rbd', 'snap', 'unprotect', + '--pool', self.configuration.rbd_pool, + '--snap', snapshot['name'], + snapshot['volume_name']) + except exception.ProcessExecutionError: + raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) + self._try_execute('rbd', 'snap', 'rm', + '--pool', self.configuration.rbd_pool, + '--snap', snapshot['name'], + snapshot['volume_name']) + + def local_path(self, volume): + """Returns the path of the rbd volume.""" + # This is the same as the remote path + # since qemu accesses it directly. + return "rbd:%s/%s" % (self.configuration.rbd_pool, volume['name']) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'rbd', + 'data': { + 'name': '%s/%s' % (self.configuration.rbd_pool, + volume['name']), + 'auth_enabled': (self.configuration.rbd_secret_uuid + is not None), + 'auth_username': self.configuration.rbd_user, + 'secret_type': 'ceph', + 'secret_uuid': self.configuration.rbd_secret_uuid, } + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def _parse_location(self, location): + prefix = 'rbd://' + if not location.startswith(prefix): + reason = _('Not stored in rbd') + raise exception.ImageUnacceptable(image_id=location, reason=reason) + pieces = map(urllib.unquote, location[len(prefix):].split('/')) + if any(map(lambda p: p == '', pieces)): + reason = _('Blank components') + raise exception.ImageUnacceptable(image_id=location, reason=reason) + if len(pieces) != 4: + reason = _('Not an rbd snapshot') + raise exception.ImageUnacceptable(image_id=location, reason=reason) + return pieces + + def _get_fsid(self): + stdout, _ = self._execute('ceph', 'fsid') + return stdout.rstrip('\n') + + def _is_cloneable(self, image_location): + try: + fsid, pool, image, snapshot = self._parse_location(image_location) + except exception.ImageUnacceptable: + return False + + if self._get_fsid() != fsid: + reason = _('%s is in a different ceph cluster') % image_location + LOG.debug(reason) + return False + + # check that we can read the image + try: + self._execute('rbd', 'info', + '--pool', pool, + '--image', image, + '--snap', snapshot) + except exception.ProcessExecutionError: + LOG.debug(_('Unable to read image %s') % image_location) + return False + + return True + + def clone_image(self, volume, image_location): + if image_location is None or not self._is_cloneable(image_location): + return False + _, pool, image, snapshot = self._parse_location(image_location) + self._clone(volume, pool, image, snapshot) + self._resize(volume) + return True + + def _ensure_tmp_exists(self): + tmp_dir = self.configuration.volume_tmp_dir + if tmp_dir and not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + # TODO(jdurgin): replace with librbd + # this is a temporary hack, since rewriting this driver + # to use librbd would take too long + self._ensure_tmp_exists() + tmp_dir = self.configuration.volume_tmp_dir + + with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: + image_utils.fetch_to_raw(context, image_service, image_id, + tmp.name) + # import creates the image, so we must remove it first + self._try_execute('rbd', 'rm', + '--pool', self.configuration.rbd_pool, + volume['name']) + + args = ['rbd', 'import', + '--pool', self.configuration.rbd_pool, + tmp.name, volume['name']] + if self._supports_layering(): + args += ['--new-format'] + self._try_execute(*args) + self._resize(volume) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + self._ensure_tmp_exists() + + tmp_dir = self.configuration.volume_tmp_dir or '/tmp' + tmp_file = os.path.join(tmp_dir, + volume['name'] + '-' + image_meta['id']) + with utils.remove_path_on_error(tmp_file): + self._try_execute('rbd', 'export', + '--pool', self.configuration.rbd_pool, + volume['name'], tmp_file) + image_utils.upload_volume(context, image_service, + image_meta, tmp_file) + os.unlink(tmp_file) diff --git a/cinder/volume/drivers/san/__init__.py b/cinder/volume/drivers/san/__init__.py new file mode 100644 index 0000000000..f890b1e792 --- /dev/null +++ b/cinder/volume/drivers/san/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.volume.san` -- Cinder San Drivers +===================================================== + +.. automodule:: cinder.volume.san + :platform: Unix + :synopsis: Module containing all the Cinder San drivers. +""" + +# Adding imports for backwards compatibility in loading volume_driver. +from hp_lefthand import HpSanISCSIDriver +from san import SanISCSIDriver +from solaris import SolarisISCSIDriver diff --git a/cinder/volume/drivers/san/hp/__init__.py b/cinder/volume/drivers/san/hp/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py new file mode 100644 index 0000000000..9468904994 --- /dev/null +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -0,0 +1,742 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver common utilities for HP 3PAR Storage array +The 3PAR drivers requires 3.1.2 firmware on the 3PAR array. + +You will need to install the python hp3parclient. +sudo pip install hp3parclient + +The drivers uses both the REST service and the SSH +command line to correctly operate. Since the +ssh credentials and the REST credentials can be different +we need to have settings for both. + +The drivers requires the use of the san_ip, san_login, +san_password settings for ssh connections into the 3PAR +array. It also requires the setting of +hp3par_api_url, hp3par_username, hp3par_password +for credentials to talk to the REST service on the 3PAR +array. +""" +import base64 +import json +import paramiko +import pprint +from random import randint +import time +import uuid + +from eventlet import greenthread +from hp3parclient import exceptions as hpexceptions +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +hp3par_opts = [ + cfg.StrOpt('hp3par_api_url', + default='', + help="3PAR WSAPI Server Url like " + "https://<3par ip>:8080/api/v1"), + cfg.StrOpt('hp3par_username', + default='', + help="3PAR Super user username"), + cfg.StrOpt('hp3par_password', + default='', + help="3PAR Super user password", + secret=True), + cfg.StrOpt('hp3par_domain', + default="OpenStack", + help="The 3par domain name to use"), + cfg.StrOpt('hp3par_cpg', + default="OpenStack", + help="The CPG to use for volume creation"), + cfg.StrOpt('hp3par_cpg_snap', + default="", + help="The CPG to use for Snapshots for volumes. " + "If empty hp3par_cpg will be used"), + cfg.StrOpt('hp3par_snapshot_retention', + default="", + help="The time in hours to retain a snapshot. " + "You can't delete it before this expires."), + cfg.StrOpt('hp3par_snapshot_expiration', + default="", + help="The time in hours when a snapshot expires " + " and is deleted. This must be larger than expiration"), + cfg.BoolOpt('hp3par_debug', + default=False, + help="Enable HTTP debugging to 3PAR") +] + + +class HP3PARCommon(): + + stats = {} + + # Valid values for volume type extra specs + # The first value in the list is the default value + valid_prov_values = ['thin', 'full'] + valid_persona_values = ['1 - Generic', + '2 - Generic-ALUA', + '6 - Generic-legacy', + '7 - HPUX-legacy', + '8 - AIX-legacy', + '9 - EGENERA', + '10 - ONTAP-legacy', + '11 - VMware'] + + def __init__(self, config): + self.sshpool = None + self.config = config + + def check_flags(self, options, required_flags): + for flag in required_flags: + if not getattr(options, flag, None): + raise exception.InvalidInput(reason=_('%s is not set') % flag) + + def _get_3par_vol_name(self, volume_id): + """ + Converts the openstack volume id from + ecffc30f-98cb-4cf5-85ee-d7309cc17cd2 + to + osv-7P.DD5jLTPWF7tcwnMF80g + + We convert the 128 bits of the uuid into a 24character long + base64 encoded string to ensure we don't exceed the maximum + allowed 31 character name limit on 3Par + + We strip the padding '=' and replace + with . + and / with - + """ + volume_name = self._encode_name(volume_id) + return "osv-%s" % volume_name + + def _get_3par_snap_name(self, snapshot_id): + snapshot_name = self._encode_name(snapshot_id) + return "oss-%s" % snapshot_name + + def _encode_name(self, name): + uuid_str = name.replace("-", "") + vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) + vol_encoded = base64.b64encode(vol_uuid.bytes) + + # 3par doesn't allow +, nor / + vol_encoded = vol_encoded.replace('+', '.') + vol_encoded = vol_encoded.replace('/', '-') + # strip off the == as 3par doesn't like those. + vol_encoded = vol_encoded.replace('=', '') + return vol_encoded + + def _capacity_from_size(self, vol_size): + + # because 3PAR volume sizes are in + # Mebibytes, Gigibytes, not Megabytes. + MB = 1000L + MiB = 1.048576 + + if int(vol_size) == 0: + capacity = MB # default: 1GB + else: + capacity = vol_size * MB + + capacity = int(round(capacity / MiB)) + return capacity + + def _cli_run(self, verb, cli_args): + """ Runs a CLI command over SSH, without doing any result parsing. """ + cli_arg_strings = [] + if cli_args: + for k, v in cli_args.items(): + if k == '': + cli_arg_strings.append(" %s" % k) + else: + cli_arg_strings.append(" %s=%s" % (k, v)) + + cmd = verb + ''.join(cli_arg_strings) + LOG.debug("SSH CMD = %s " % cmd) + + (stdout, stderr) = self._run_ssh(cmd, False) + + # we have to strip out the input and exit lines + tmp = stdout.split("\r\n") + out = tmp[5:len(tmp) - 2] + return out + + def _ssh_execute(self, ssh, cmd, + check_exit_code=True): + """ + We have to do this in order to get CSV output + from the CLI command. We first have to issue + a command to tell the CLI that we want the output + to be formatted in CSV, then we issue the real + command. + """ + LOG.debug(_('Running cmd (SSH): %s'), cmd) + + channel = ssh.invoke_shell() + stdin_stream = channel.makefile('wb') + stdout_stream = channel.makefile('rb') + stderr_stream = channel.makefile('rb') + + stdin_stream.write('''setclienv csvtable 1 +%s +exit +''' % cmd) + + # stdin.write('process_input would go here') + # stdin.flush() + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + stdout_stream.close() + stderr_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise exception.ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + channel.close() + return (stdout, stderr) + + def _run_ssh(self, command, check_exit=True, attempts=1): + if not self.sshpool: + self.sshpool = utils.SSHPool(self.config.san_ip, + self.config.san_ssh_port, + self.config.ssh_conn_timeout, + self.config.san_login, + password=self.config.san_password, + privatekey= + self.config.san_private_key, + min_size= + self.config.ssh_min_pool_conn, + max_size= + self.config.ssh_max_pool_conn) + try: + total_attempts = attempts + with self.sshpool.item() as ssh: + while attempts > 0: + attempts -= 1 + try: + return self._ssh_execute(ssh, command, + check_exit_code=check_exit) + except Exception as e: + LOG.error(e) + greenthread.sleep(randint(20, 500) / 100.0) + raise paramiko.SSHException(_("SSH Command failed after " + "'%(total_attempts)r' attempts" + ": '%(command)s'"), locals()) + except Exception as e: + LOG.error(_("Error running ssh command: %s") % command) + raise e + + def _delete_3par_host(self, hostname): + self._cli_run('removehost %s' % hostname, None) + + def _create_3par_vlun(self, volume, hostname): + out = self._cli_run('createvlun %s auto %s' % (volume, hostname), None) + if out and len(out) > 1: + if "must be in the same domain" in out[0]: + err = out[0].strip() + err = err + " " + out[1].strip() + raise exception.Invalid3PARDomain(err=err) + + def _safe_hostname(self, hostname): + """ + We have to use a safe hostname length + for 3PAR host names. + """ + try: + index = hostname.index('.') + except ValueError: + # couldn't find it + index = len(hostname) + + # we'll just chop this off for now. + if index > 23: + index = 23 + + return hostname[:index] + + def _get_3par_host(self, hostname): + out = self._cli_run('showhost -verbose %s' % (hostname), None) + LOG.debug("OUTPUT = \n%s" % (pprint.pformat(out))) + host = {'id': None, 'name': None, + 'domain': None, + 'descriptors': {}, + 'iSCSIPaths': [], + 'FCPaths': []} + + if out: + err = out[0] + if err == 'no hosts listed': + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "HOST '%s' was not found" % hostname} + raise hpexceptions.HTTPNotFound(msg) + + # start parsing the lines after the header line + for line in out[1:]: + if line == '': + break + tmp = line.split(',') + paths = {} + + LOG.debug("line = %s" % (pprint.pformat(tmp))) + host['id'] = tmp[0] + host['name'] = tmp[1] + + portPos = tmp[4] + LOG.debug("portPos = %s" % (pprint.pformat(portPos))) + if portPos == '---': + portPos = None + else: + port = portPos.split(':') + portPos = {'node': int(port[0]), 'slot': int(port[1]), + 'cardPort': int(port[2])} + + paths['portPos'] = portPos + + # If FC entry + if tmp[5] == 'n/a': + paths['wwn'] = tmp[3] + host['FCPaths'].append(paths) + # else iSCSI entry + else: + paths['name'] = tmp[3] + paths['ipAddr'] = tmp[5] + host['iSCSIPaths'].append(paths) + + # find the offset to the description stuff + offset = 0 + for line in out: + if line[:15] == '---------- Host': + break + else: + offset += 1 + + info = out[offset + 2] + tmp = info.split(':') + host['domain'] = tmp[1] + + info = out[offset + 4] + tmp = info.split(':') + host['descriptors']['location'] = tmp[1] + + info = out[offset + 5] + tmp = info.split(':') + host['descriptors']['ipAddr'] = tmp[1] + + info = out[offset + 6] + tmp = info.split(':') + host['descriptors']['os'] = tmp[1] + + info = out[offset + 7] + tmp = info.split(':') + host['descriptors']['model'] = tmp[1] + + info = out[offset + 8] + tmp = info.split(':') + host['descriptors']['contact'] = tmp[1] + + info = out[offset + 9] + tmp = info.split(':') + host['descriptors']['comment'] = tmp[1] + + return host + + def get_ports(self): + # First get the active FC ports + out = self._cli_run('showport', None) + + # strip out header + # N:S:P,Mode,State,----Node_WWN----,-Port_WWN/HW_Addr-,Type, + # Protocol,Label,Partner,FailoverState + out = out[1:len(out) - 2] + + ports = {'FC': [], 'iSCSI': []} + for line in out: + tmp = line.split(',') + + if tmp: + if tmp[1] == 'target' and tmp[2] == 'ready': + if tmp[6] == 'FC': + ports['FC'].append(tmp[4]) + + # now get the active iSCSI ports + out = self._cli_run('showport -iscsi', None) + + # strip out header + # N:S:P,State,IPAddr,Netmask,Gateway, + # TPGT,MTU,Rate,DHCP,iSNS_Addr,iSNS_Port + out = out[1:len(out) - 2] + for line in out: + tmp = line.split(',') + + if tmp: + if tmp[1] == 'ready': + ports['iSCSI'].append(tmp[2]) + + LOG.debug("PORTS = %s" % pprint.pformat(ports)) + return ports + + def get_volume_stats(self, refresh, client): + if refresh: + self._update_volume_stats(client) + + return self.stats + + def _update_volume_stats(self, client): + # const to convert MiB to GB + const = 0.0009765625 + + # storage_protocol and volume_backend_name are + # set in the child classes + stats = {'driver_version': '1.0', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'storage_protocol': None, + 'total_capacity_gb': 'unknown', + 'vendor_name': 'Hewlett-Packard', + 'volume_backend_name': None} + + try: + cpg = client.getCPG(self.config.hp3par_cpg) + if 'limitMiB' not in cpg['SDGrowth']: + total_capacity = 'infinite' + free_capacity = 'infinite' + else: + total_capacity = int(cpg['SDGrowth']['limitMiB'] * const) + free_capacity = int((cpg['SDGrowth']['limitMiB'] - + cpg['UsrUsage']['usedMiB']) * const) + + stats['total_capacity_gb'] = total_capacity + stats['free_capacity_gb'] = free_capacity + except hpexceptions.HTTPNotFound: + err = (_("CPG (%s) doesn't exist on array") + % self.config.hp3par_cpg) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + self.stats = stats + + def create_vlun(self, volume, host, client): + """ + In order to export a volume on a 3PAR box, we have to + create a VLUN. + """ + volume_name = self._get_3par_vol_name(volume['id']) + self._create_3par_vlun(volume_name, host['name']) + return client.getVLUN(volume_name) + + def delete_vlun(self, volume, connector, client): + hostname = self._safe_hostname(connector['host']) + + volume_name = self._get_3par_vol_name(volume['id']) + vlun = client.getVLUN(volume_name) + client.deleteVLUN(volume_name, vlun['lun'], hostname) + self._delete_3par_host(hostname) + + def _get_volume_type(self, type_id): + ctxt = context.get_admin_context() + return volume_types.get_volume_type(ctxt, type_id) + + def _get_volume_type_value(self, volume_type, key, default=None): + if volume_type is not None: + specs = volume_type.get('extra_specs') + if key in specs: + return specs[key] + else: + return default + else: + return default + + def get_persona_type(self, volume): + default_persona = self.valid_persona_values[0] + type_id = volume.get('volume_type_id', None) + volume_type = None + if type_id is not None: + volume_type = self._get_volume_type(type_id) + persona_value = self._get_volume_type_value(volume_type, 'persona', + default_persona) + if persona_value not in self.valid_persona_values: + err = _("Must specify a valid persona %(valid)s, " + "value '%(persona)s' is invalid.") % \ + ({'valid': self.valid_persona_values, + 'persona': persona_value}) + raise exception.InvalidInput(reason=err) + # persona is set by the id so remove the text and return the id + # i.e for persona '1 - Generic' returns 1 + persona_id = persona_value.split(' ') + return persona_id[0] + + def create_volume(self, volume, client): + LOG.debug("CREATE VOLUME (%s : %s %s)" % + (volume['display_name'], volume['name'], + self._get_3par_vol_name(volume['id']))) + try: + comments = {'volume_id': volume['id'], + 'name': volume['name'], + 'type': 'OpenStack'} + + name = volume.get('display_name', None) + if name: + comments['display_name'] = name + + # get the options supported by volume types + volume_type = None + type_id = volume.get('volume_type_id', None) + if type_id is not None: + volume_type = self._get_volume_type(type_id) + + cpg = self._get_volume_type_value(volume_type, 'cpg', + self.config.hp3par_cpg) + + # if provisioning is not set use thin + default_prov = self.valid_prov_values[0] + prov_value = self._get_volume_type_value(volume_type, + 'provisioning', + default_prov) + # check for valid provisioning type + if prov_value not in self.valid_prov_values: + err = _("Must specify a valid provisioning type %(valid)s, " + "value '%(prov)s' is invalid.") % \ + ({'valid': self.valid_prov_values, + 'prov': prov_value}) + raise exception.InvalidInput(reason=err) + + ttpv = True + if prov_value == "full": + ttpv = False + + # default to hp3par_cpg if hp3par_cpg_snap is not set. + if self.config.hp3par_cpg_snap == "": + snap_default = self.config.hp3par_cpg + else: + snap_default = self.config.hp3par_cpg_snap + snap_cpg = self._get_volume_type_value(volume_type, + 'snap_cpg', + snap_default) + + # check for valid persona even if we don't use it until + # attach time, this will given end user notice that the + # persona type is invalid at volume creation time + self.get_persona_type(volume) + + if type_id is not None: + comments['volume_type_name'] = volume_type.get('name') + comments['volume_type_id'] = type_id + + extras = {'comment': json.dumps(comments), + 'snapCPG': snap_cpg, + 'tpvv': ttpv} + + capacity = self._capacity_from_size(volume['size']) + volume_name = self._get_3par_vol_name(volume['id']) + client.createVolume(volume_name, cpg, capacity, extras) + + except hpexceptions.HTTPConflict: + raise exception.Duplicate(_("Volume (%s) already exists on array") + % volume_name) + except hpexceptions.HTTPBadRequest as ex: + LOG.error(str(ex)) + raise exception.Invalid(ex.get_description()) + except exception.InvalidInput as ex: + LOG.error(str(ex)) + raise ex + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex.get_description()) + + metadata = {'3ParName': volume_name, 'CPG': self.config.hp3par_cpg, + 'snapCPG': extras['snapCPG']} + return metadata + + def _copy_volume(self, src_name, dest_name): + self._cli_run('createvvcopy -p %s %s' % (src_name, dest_name), None) + + def _get_volume_state(self, vol_name): + out = self._cli_run('showvv -state %s' % vol_name, None) + status = None + if out: + # out[0] is the header + info = out[1].split(',') + status = info[5] + + return status + + @utils.synchronized('3parclone', external=True) + def create_cloned_volume(self, volume, src_vref, client): + + try: + orig_name = self._get_3par_vol_name(volume['source_volid']) + vol_name = self._get_3par_vol_name(volume['id']) + # We need to create a new volume first. Otherwise you + # can't delete the original + new_vol = self.create_volume(volume, client) + + # make the 3PAR copy the contents. + # can't delete the original until the copy is done. + self._copy_volume(orig_name, vol_name) + + # this can take a long time to complete + done = False + while not done: + status = self._get_volume_state(vol_name) + if status == 'normal': + done = True + elif status == 'copy_target': + LOG.debug("3Par still copying %s => %s" + % (orig_name, vol_name)) + else: + msg = _("Unexpected state while cloning %s") % status + LOG.warn(msg) + raise exception.CinderException(msg) + + if not done: + # wait 5 seconds between tests + time.sleep(5) + + return new_vol + except hpexceptions.HTTPForbidden: + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound: + raise exception.NotFound() + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex) + + return None + + def delete_volume(self, volume, client): + try: + volume_name = self._get_3par_vol_name(volume['id']) + client.deleteVolume(volume_name) + except hpexceptions.HTTPNotFound as ex: + # We'll let this act as if it worked + # it helps clean up the cinder entries. + LOG.error(str(ex)) + except hpexceptions.HTTPForbidden as ex: + LOG.error(str(ex)) + raise exception.NotAuthorized(ex.get_description()) + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex.get_description()) + + def create_volume_from_snapshot(self, volume, snapshot, client): + """ + Creates a volume from a snapshot. + + TODO: support using the size from the user. + """ + LOG.debug("Create Volume from Snapshot\n%s\n%s" % + (pprint.pformat(volume['display_name']), + pprint.pformat(snapshot['display_name']))) + + if snapshot['volume_size'] != volume['size']: + err = "You cannot change size of the volume. It must " + "be the same as the snapshot." + LOG.error(err) + raise exception.InvalidInput(reason=err) + + try: + snap_name = self._get_3par_snap_name(snapshot['id']) + vol_name = self._get_3par_vol_name(volume['id']) + + extra = {'volume_id': volume['id'], + 'snapshot_id': snapshot['id']} + name = snapshot.get('display_name', None) + if name: + extra['name'] = name + + description = snapshot.get('display_description', None) + if description: + extra['description'] = description + + optional = {'comment': json.dumps(extra), + 'readOnly': False} + + client.createSnapshot(vol_name, snap_name, optional) + except hpexceptions.HTTPForbidden: + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound: + raise exception.NotFound() + + def create_snapshot(self, snapshot, client): + LOG.debug("Create Snapshot\n%s" % pprint.pformat(snapshot)) + + try: + snap_name = self._get_3par_snap_name(snapshot['id']) + vol_name = self._get_3par_vol_name(snapshot['volume_id']) + + extra = {'volume_name': snapshot['volume_name']} + vol_id = snapshot.get('volume_id', None) + if vol_id: + extra['volume_id'] = vol_id + + try: + extra['name'] = snapshot['display_name'] + except AttribteError: + pass + + try: + extra['description'] = snapshot['display_description'] + except AttribteError: + pass + + optional = {'comment': json.dumps(extra), + 'readOnly': True} + if self.config.hp3par_snapshot_expiration: + optional['expirationHours'] = ( + self.config.hp3par_snapshot_expiration) + + if self.config.hp3par_snapshot_retention: + optional['retentionHours'] = ( + self.config.hp3par_snapshot_retention) + + client.createSnapshot(snap_name, vol_name, optional) + except hpexceptions.HTTPForbidden: + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound: + raise exception.NotFound() + + def delete_snapshot(self, snapshot, client): + LOG.debug("Delete Snapshot\n%s" % pprint.pformat(snapshot)) + + try: + snap_name = self._get_3par_snap_name(snapshot['id']) + client.deleteVolume(snap_name) + except hpexceptions.HTTPForbidden: + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound as ex: + LOG.error(str(ex)) diff --git a/cinder/volume/drivers/san/hp/hp_3par_fc.py b/cinder/volume/drivers/san/hp/hp_3par_fc.py new file mode 100644 index 0000000000..7af686268f --- /dev/null +++ b/cinder/volume/drivers/san/hp/hp_3par_fc.py @@ -0,0 +1,259 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver for HP 3PAR Storage array. This driver requires 3.1.2 firmware +on the 3PAR array. + +You will need to install the python hp3parclient. +sudo pip install hp3parclient + +Set the following in the cinder.conf file to enable the +3PAR Fibre Channel Driver along with the required flags: + +volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver +""" + +from hp3parclient import client +from hp3parclient import exceptions as hpexceptions +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +import cinder.volume.driver +from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon +from cinder.volume.drivers.san import san + +VERSION = 1.0 +LOG = logging.getLogger(__name__) + + +class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): + """OpenStack Fibre Channel driver to enable 3PAR storage array. + + Version history: + 1.0 - Initial driver + + """ + + def __init__(self, *args, **kwargs): + super(HP3PARFCDriver, self).__init__(*args, **kwargs) + self.client = None + self.common = None + self.configuration.append_config_values(hpcommon.hp3par_opts) + self.configuration.append_config_values(san.san_opts) + + def _init_common(self): + return hpcommon.HP3PARCommon(self.configuration) + + def _check_flags(self): + """Sanity check to ensure we have required options set.""" + required_flags = ['hp3par_api_url', 'hp3par_username', + 'hp3par_password', + 'san_ip', 'san_login', 'san_password'] + self.common.check_flags(self.configuration, required_flags) + + def _create_client(self): + return client.HP3ParClient(self.configuration.hp3par_api_url) + + def get_volume_stats(self, refresh): + stats = self.common.get_volume_stats(refresh, self.client) + stats['storage_protocol'] = 'FC' + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or self.__class__.__name__ + return stats + + def do_setup(self, context): + self.common = self._init_common() + self._check_flags() + self.client = self._create_client() + if self.configuration.hp3par_debug: + self.client.debug_rest(True) + + try: + LOG.debug("Connecting to 3PAR") + self.client.login(self.configuration.hp3par_username, + self.configuration.hp3par_password) + except hpexceptions.HTTPUnauthorized as ex: + LOG.warning("Failed to connect to 3PAR (%s) because %s" % + (self.configuration.hp3par_api_url, str(ex))) + msg = _("Login to 3PAR array invalid") + raise exception.InvalidInput(reason=msg) + + # make sure the CPG exists + try: + cpg = self.client.getCPG(self.configuration.hp3par_cpg) + except hpexceptions.HTTPNotFound as ex: + err = (_("CPG (%s) doesn't exist on array") + % self.configuration.hp3par_cpg) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + if ('domain' not in cpg + and cpg['domain'] != self.configuration.hp3par_domain): + err = "CPG's domain '%s' and config option hp3par_domain '%s' \ +must be the same" % (cpg['domain'], self.configuration.hp3par_domain) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + @utils.synchronized('3par-vol', external=True) + def create_volume(self, volume): + metadata = self.common.create_volume(volume, self.client) + return {'metadata': metadata} + + def create_cloned_volume(self, volume, src_vref): + new_vol = self.common.create_cloned_volume(volume, src_vref, + self.client) + return {'metadata': new_vol} + + @utils.synchronized('3par-vol', external=True) + def delete_volume(self, volume): + self.common.delete_volume(volume, self.client) + + @utils.synchronized('3par-vol', external=True) + def create_volume_from_snapshot(self, volume, snapshot): + """ + Creates a volume from a snapshot. + + TODO: support using the size from the user. + """ + self.common.create_volume_from_snapshot(volume, snapshot, self.client) + + @utils.synchronized('3par-snap', external=True) + def create_snapshot(self, snapshot): + self.common.create_snapshot(snapshot, self.client) + + @utils.synchronized('3par-snap', external=True) + def delete_snapshot(self, snapshot): + self.common.delete_snapshot(snapshot, self.client) + + @utils.synchronized('3par-attach', external=True) + def initialize_connection(self, volume, connector): + """Assigns the volume to a server. + + Assign any created volume to a compute node/host so that it can be + used from that host. + + The driver returns a driver_volume_type of 'fibre_channel'. + The target_wwn can be a single entry or a list of wwns that + correspond to the list of remote wwn(s) that will export the volume. + Example return values: + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': '1234567890123', + } + } + + or + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': ['1234567890123', '0987654321321'], + } + } + + + Steps to export a volume on 3PAR + * Create a host on the 3par with the target wwn + * Create a VLUN for that HOST with the volume we want to export. + + """ + # we have to make sure we have a host + host = self._create_host(volume, connector) + + # now that we have a host, create the VLUN + vlun = self.common.create_vlun(volume, host, self.client) + + ports = self.common.get_ports() + + info = {'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': vlun['lun'], + 'target_discovered': True, + 'target_wwn': ports['FC']}} + return info + + @utils.synchronized('3par-attach', external=True) + def terminate_connection(self, volume, connector, force): + """ + Driver entry point to unattach a volume from an instance. + """ + self.common.delete_vlun(volume, connector, self.client) + pass + + def _create_3par_fibrechan_host(self, hostname, wwn, domain, persona_id): + out = self.common._cli_run('createhost -persona %s -domain %s %s %s' + % (persona_id, domain, + hostname, " ".join(wwn)), None) + if out and len(out) > 1: + if "already used by host" in out[1]: + err = out[1].strip() + info = _("The hostname must be called '%s'") % hostname + raise exception.Duplicate3PARHost(err=err, info=info) + + def _modify_3par_fibrechan_host(self, hostname, wwn): + # when using -add, you can not send the persona or domain options + out = self.common._cli_run('createhost -add %s %s' + % (hostname, " ".join(wwn)), None) + + def _create_host(self, volume, connector): + """ + This is a 3PAR host entry for exporting volumes + via active VLUNs. + """ + host = None + hostname = self.common._safe_hostname(connector['host']) + try: + host = self.common._get_3par_host(hostname) + if not host['FCPaths']: + self._modify_3par_fibrechan_host(hostname, connector['wwpns']) + host = self.common._get_3par_host(hostname) + except hpexceptions.HTTPNotFound as ex: + # get persona from the volume type extra specs + persona_id = self.common.get_persona_type(volume) + # host doesn't exist, we have to create it + self._create_3par_fibrechan_host(hostname, connector['wwpns'], + self.configuration.hp3par_domain, + persona_id) + host = self.common._get_3par_host(hostname) + + return host + + @utils.synchronized('3par-exp', external=True) + def create_export(self, context, volume): + pass + + @utils.synchronized('3par-exp', external=True) + def ensure_export(self, context, volume): + pass + + @utils.synchronized('3par-exp', external=True) + def remove_export(self, context, volume): + pass diff --git a/cinder/volume/drivers/san/hp/hp_3par_iscsi.py b/cinder/volume/drivers/san/hp/hp_3par_iscsi.py new file mode 100644 index 0000000000..f1b1b83552 --- /dev/null +++ b/cinder/volume/drivers/san/hp/hp_3par_iscsi.py @@ -0,0 +1,279 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver for HP 3PAR Storage array. This driver requires 3.1.2 firmware +on the 3PAR array. + +You will need to install the python hp3parclient. +sudo pip install hp3parclient + +Set the following in the cinder.conf file to enable the +3PAR iSCSI Driver along with the required flags: + +volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver +""" + +from hp3parclient import client +from hp3parclient import exceptions as hpexceptions + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +import cinder.volume.driver +from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon +from cinder.volume.drivers.san import san + +VERSION = 1.0 +LOG = logging.getLogger(__name__) + + +class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver): + """OpenStack iSCSI driver to enable 3PAR storage array. + + Version history: + 1.0 - Initial driver + + """ + def __init__(self, *args, **kwargs): + super(HP3PARISCSIDriver, self).__init__(*args, **kwargs) + self.client = None + self.common = None + self.configuration.append_config_values(hpcommon.hp3par_opts) + self.configuration.append_config_values(san.san_opts) + + def _init_common(self): + return hpcommon.HP3PARCommon(self.configuration) + + def _check_flags(self): + """Sanity check to ensure we have required options set.""" + required_flags = ['hp3par_api_url', 'hp3par_username', + 'hp3par_password', 'iscsi_ip_address', + 'iscsi_port', 'san_ip', 'san_login', + 'san_password'] + self.common.check_flags(self.configuration, required_flags) + + def _create_client(self): + return client.HP3ParClient(self.configuration.hp3par_api_url) + + def get_volume_stats(self, refresh): + stats = self.common.get_volume_stats(refresh, self.client) + stats['storage_protocol'] = 'iSCSI' + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or self.__class__.__name__ + return stats + + def do_setup(self, context): + self.common = self._init_common() + self._check_flags() + self.client = self._create_client() + if self.configuration.hp3par_debug: + self.client.debug_rest(True) + + try: + LOG.debug("Connecting to 3PAR") + self.client.login(self.configuration.hp3par_username, + self.configuration.hp3par_password) + except hpexceptions.HTTPUnauthorized as ex: + LOG.warning("Failed to connect to 3PAR (%s) because %s" % + (self.configuration.hp3par_api_url, str(ex))) + msg = _("Login to 3PAR array invalid") + raise exception.InvalidInput(reason=msg) + + # make sure the CPG exists + try: + cpg = self.client.getCPG(self.configuration.hp3par_cpg) + except hpexceptions.HTTPNotFound as ex: + err = (_("CPG (%s) doesn't exist on array") + % self.configuration.hp3par_cpg) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + if ('domain' not in cpg and + cpg['domain'] != self.configuration.hp3par_domain): + err = "CPG's domain '%s' and config option hp3par_domain '%s' \ +must be the same" % (cpg['domain'], self.configuration.hp3par_domain) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + # make sure ssh works. + self._iscsi_discover_target_iqn(self.configuration.iscsi_ip_address) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + @utils.synchronized('3par-vol', external=True) + def create_volume(self, volume): + metadata = self.common.create_volume(volume, self.client) + + return {'provider_location': "%s:%s" % + (self.configuration.iscsi_ip_address, + self.configuration.iscsi_port), + 'metadata': metadata} + + def create_cloned_volume(self, volume, src_vref): + """ Clone an existing volume. """ + new_vol = self.common.create_cloned_volume(volume, src_vref, + self.client) + return {'provider_location': "%s:%s" % + (self.configuration.iscsi_ip_address, + self.configuration.iscsi_port), + 'metadata': new_vol} + + @utils.synchronized('3par-vol', external=True) + def delete_volume(self, volume): + self.common.delete_volume(volume, self.client) + + @utils.synchronized('3par-vol', external=True) + def create_volume_from_snapshot(self, volume, snapshot): + """ + Creates a volume from a snapshot. + + TODO: support using the size from the user. + """ + self.common.create_volume_from_snapshot(volume, snapshot, self.client) + + @utils.synchronized('3par-snap', external=True) + def create_snapshot(self, snapshot): + self.common.create_snapshot(snapshot, self.client) + + @utils.synchronized('3par-snap', external=True) + def delete_snapshot(self, snapshot): + self.common.delete_snapshot(snapshot, self.client) + + @utils.synchronized('3par-attach', external=True) + def initialize_connection(self, volume, connector): + """Assigns the volume to a server. + + Assign any created volume to a compute node/host so that it can be + used from that host. + + This driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_protal': '127.0.0.1:3260', + 'volume_id': 1, + } + } + + Steps to export a volume on 3PAR + * Get the 3PAR iSCSI iqn + * Create a host on the 3par + * create vlun on the 3par + """ + # get the target_iqn on the 3par interface. + target_iqn = self._iscsi_discover_target_iqn( + self.configuration.iscsi_ip_address) + + # we have to make sure we have a host + host = self._create_host(volume, connector) + + # now that we have a host, create the VLUN + vlun = self.common.create_vlun(volume, host, self.client) + + info = {'driver_volume_type': 'iscsi', + 'data': {'target_portal': "%s:%s" % + (self.configuration.iscsi_ip_address, + self.configuration.iscsi_port), + 'target_iqn': target_iqn, + 'target_lun': vlun['lun'], + 'target_discovered': True + } + } + return info + + @utils.synchronized('3par-attach', external=True) + def terminate_connection(self, volume, connector, force): + """ + Driver entry point to unattach a volume from an instance. + """ + self.common.delete_vlun(volume, connector, self.client) + + def _iscsi_discover_target_iqn(self, remote_ip): + result = self.common._cli_run('showport -ids', None) + + iqn = None + if result: + # first line is header + result = result[1:] + for line in result: + info = line.split(",") + if info and len(info) > 2: + if info[1] == remote_ip: + iqn = info[2] + + return iqn + + def _create_3par_iscsi_host(self, hostname, iscsi_iqn, domain, persona_id): + cmd = 'createhost -iscsi -persona %s -domain %s %s %s' % \ + (persona_id, domain, hostname, iscsi_iqn) + out = self.common._cli_run(cmd, None) + if out and len(out) > 1: + if "already used by host" in out[1]: + err = out[1].strip() + info = _("The hostname must be called '%s'") % hostname + raise exception.Duplicate3PARHost(err=err, info=info) + + def _modify_3par_iscsi_host(self, hostname, iscsi_iqn): + # when using -add, you can not send the persona or domain options + self.common._cli_run('createhost -iscsi -add %s %s' + % (hostname, iscsi_iqn), None) + + def _create_host(self, volume, connector): + """ + This is a 3PAR host entry for exporting volumes + via active VLUNs. + """ + # make sure we don't have the host already + host = None + hostname = self.common._safe_hostname(connector['host']) + try: + host = self.common._get_3par_host(hostname) + if not host['iSCSIPaths']: + self._modify_3par_iscsi_host(hostname, connector['initiator']) + host = self.common._get_3par_host(hostname) + except hpexceptions.HTTPNotFound: + # get persona from the volume type extra specs + persona_id = self.common.get_persona_type(volume) + # host doesn't exist, we have to create it + self._create_3par_iscsi_host(hostname, connector['initiator'], + self.configuration.hp3par_domain, + persona_id) + host = self.common._get_3par_host(hostname) + + return host + + @utils.synchronized('3par-exp', external=True) + def create_export(self, context, volume): + pass + + @utils.synchronized('3par-exp', external=True) + def ensure_export(self, context, volume): + pass + + @utils.synchronized('3par-exp', external=True) + def remove_export(self, context, volume): + pass diff --git a/cinder/volume/drivers/san/hp_lefthand.py b/cinder/volume/drivers/san/hp_lefthand.py new file mode 100644 index 0000000000..6dff285a0a --- /dev/null +++ b/cinder/volume/drivers/san/hp_lefthand.py @@ -0,0 +1,314 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +HP Lefthand SAN ISCSI Driver. + +The driver communicates to the backend aka Cliq via SSH to perform all the +operations on the SAN. +""" +from lxml import etree + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume.drivers.san.san import SanISCSIDriver + + +LOG = logging.getLogger(__name__) + + +class HpSanISCSIDriver(SanISCSIDriver): + """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + + We use the CLIQ interface, over SSH. + + Rough overview of CLIQ commands used: + + :createVolume: (creates the volume) + + :getVolumeInfo: (to discover the IQN etc) + + :getClusterInfo: (to discover the iSCSI target IP address) + + :assignVolumeChap: (exports it with CHAP security) + + The 'trick' here is that the HP SAN enforces security by default, so + normally a volume mount would need both to configure the SAN in the volume + layer and do the mount on the compute layer. Multi-layer operations are + not catered for at the moment in the cinder architecture, so instead we + share the volume using CHAP at volume creation time. Then the mount need + only use those CHAP credentials, so can take place exclusively in the + compute layer. + """ + + device_stats = {} + + def __init__(self, *args, **kwargs): + super(HpSanISCSIDriver, self).__init__(*args, **kwargs) + self.cluster_vip = None + + def _cliq_run(self, verb, cliq_args, check_exit_code=True): + """Runs a CLIQ command over SSH, without doing any result parsing""" + cliq_arg_strings = [] + for k, v in cliq_args.items(): + cliq_arg_strings.append(" %s=%s" % (k, v)) + cmd = verb + ''.join(cliq_arg_strings) + + return self._run_ssh(cmd, check_exit_code) + + def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): + """Runs a CLIQ command over SSH, parsing and checking the output""" + cliq_args['output'] = 'XML' + (out, _err) = self._cliq_run(verb, cliq_args, check_cliq_result) + + LOG.debug(_("CLIQ command returned %s"), out) + + result_xml = etree.fromstring(out) + if check_cliq_result: + response_node = result_xml.find("response") + if response_node is None: + msg = (_("Malformed response to CLIQ command " + "%(verb)s %(cliq_args)s. Result=%(out)s") % + locals()) + raise exception.VolumeBackendAPIException(data=msg) + + result_code = response_node.attrib.get("result") + + if result_code != "0": + msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " + " Result=%(out)s") % + locals()) + raise exception.VolumeBackendAPIException(data=msg) + + return result_xml + + def _cliq_get_cluster_info(self, cluster_name): + """Queries for info about the cluster (including IP)""" + cliq_args = {} + cliq_args['clusterName'] = cluster_name + cliq_args['searchDepth'] = '1' + cliq_args['verbose'] = '0' + + result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) + + return result_xml + + def _cliq_get_cluster_vip(self, cluster_name): + """Gets the IP on which a cluster shares iSCSI volumes""" + cluster_xml = self._cliq_get_cluster_info(cluster_name) + + vips = [] + for vip in cluster_xml.findall("response/cluster/vip"): + vips.append(vip.attrib.get('ipAddress')) + + if len(vips) == 1: + return vips[0] + + _xml = etree.tostring(cluster_xml) + msg = (_("Unexpected number of virtual ips for cluster " + " %(cluster_name)s. Result=%(_xml)s") % + locals()) + raise exception.VolumeBackendAPIException(data=msg) + + def _cliq_get_volume_info(self, volume_name): + """Gets the volume info, including IQN""" + cliq_args = {} + cliq_args['volumeName'] = volume_name + result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) + + # Result looks like this: + # + # + # + # + # + # + # + # + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + volume_attributes = {} + + volume_node = result_xml.find("response/volume") + for k, v in volume_node.attrib.items(): + volume_attributes["volume." + k] = v + + status_node = volume_node.find("status") + if status_node is not None: + for k, v in status_node.attrib.items(): + volume_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = volume_node.find("permission") + if permission_node is not None: + for k, v in status_node.attrib.items(): + volume_attributes["permission." + k] = v + + LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % + locals()) + return volume_attributes + + def create_volume(self, volume): + """Creates a volume.""" + cliq_args = {} + cliq_args['clusterName'] = self.configuration.san_clustername + + if self.configuration.san_thin_provision: + cliq_args['thinProvision'] = '1' + else: + cliq_args['thinProvision'] = '0' + + cliq_args['volumeName'] = volume['name'] + if int(volume['size']) == 0: + cliq_args['size'] = '100MB' + else: + cliq_args['size'] = '%sGB' % volume['size'] + + self._cliq_run_xml("createVolume", cliq_args) + + volume_info = self._cliq_get_volume_info(volume['name']) + cluster_name = volume_info['volume.clusterName'] + iscsi_iqn = volume_info['volume.iscsiIqn'] + + #TODO(justinsb): Is this always 1? Does it matter? + cluster_interface = '1' + + if not self.cluster_vip: + self.cluster_vip = self._cliq_get_cluster_vip(cluster_name) + iscsi_portal = self.cluster_vip + ":3260," + cluster_interface + + model_update = {} + + # NOTE(jdg): LH volumes always at lun 0 ? + model_update['provider_location'] = ("%s %s %s" % + (iscsi_portal, + iscsi_iqn, + 0)) + + return model_update + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + raise NotImplementedError() + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_volume(self, volume): + """Deletes a volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['prompt'] = 'false' # Don't confirm + try: + volume_info = self._cliq_get_volume_info(volume['name']) + except exception.ProcessExecutionError: + LOG.error("Volume did not exist. It will not be deleted") + return + self._cliq_run_xml("deleteVolume", cliq_args) + + def local_path(self, volume): + msg = _("local_path not supported") + raise exception.VolumeBackendAPIException(data=msg) + + def initialize_connection(self, volume, connector): + """Assigns the volume to a server. + + Assign any created volume to a compute node/host so that it can be + used from that host. HP VSA requires a volume to be assigned + to a server. + + This driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_protal': '127.0.0.1:3260', + 'volume_id': 1, + } + } + + """ + self._create_server(connector) + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['serverName'] = connector['host'] + self._cliq_run_xml("assignVolumeToServer", cliq_args) + + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + + def _create_server(self, connector): + cliq_args = {} + cliq_args['serverName'] = connector['host'] + out = self._cliq_run_xml("getServerInfo", cliq_args, False) + response = out.find("response") + result = response.attrib.get("result") + if result != '0': + cliq_args = {} + cliq_args['serverName'] = connector['host'] + cliq_args['initiator'] = connector['initiator'] + self._cliq_run_xml("createServer", cliq_args) + + def terminate_connection(self, volume, connector, **kwargs): + """Unassign the volume from the host.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['serverName'] = connector['host'] + self._cliq_run_xml("unassignVolumeToServer", cliq_args) + + def get_volume_stats(self, refresh): + if refresh: + self._update_backend_status() + + return self.device_stats + + def _update_backend_status(self): + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['driver_version'] = '1.0' + data['reserved_percentage'] = 0 + data['storage_protocol'] = 'iSCSI' + data['vendor_name'] = 'Hewlett-Packard' + + result_xml = self._cliq_run_xml("getClusterInfo", {}) + cluster_node = result_xml.find("response/cluster") + total_capacity = cluster_node.attrib.get("spaceTotal") + free_capacity = cluster_node.attrib.get("unprovisionedSpace") + GB = 1073741824 + + data['total_capacity_gb'] = int(total_capacity) / GB + data['free_capacity_gb'] = int(free_capacity) / GB + self.device_stats = data diff --git a/cinder/volume/drivers/san/san.py b/cinder/volume/drivers/san/san.py new file mode 100644 index 0000000000..fbd9c2ead8 --- /dev/null +++ b/cinder/volume/drivers/san/san.py @@ -0,0 +1,177 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Default Driver for san-stored volumes. + +The unique thing about a SAN is that we don't expect that we can run the volume +controller on the SAN hardware. We expect to access it over SSH or some API. +""" + +import random + +from eventlet import greenthread +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume.driver import ISCSIDriver + +LOG = logging.getLogger(__name__) + +san_opts = [ + cfg.BoolOpt('san_thin_provision', + default=True, + help='Use thin provisioning for SAN volumes?'), + cfg.StrOpt('san_ip', + default='', + help='IP address of SAN controller'), + cfg.StrOpt('san_login', + default='admin', + help='Username for SAN controller'), + cfg.StrOpt('san_password', + default='', + help='Password for SAN controller', + secret=True), + cfg.StrOpt('san_private_key', + default='', + help='Filename of private key to use for SSH authentication'), + cfg.StrOpt('san_clustername', + default='', + help='Cluster name to use for creating volumes'), + cfg.IntOpt('san_ssh_port', + default=22, + help='SSH port to use with SAN'), + cfg.BoolOpt('san_is_local', + default=False, + help='Execute commands locally instead of over SSH; ' + 'use if the volume service is running on the SAN device'), + cfg.IntOpt('ssh_conn_timeout', + default=30, + help="SSH connection timeout in seconds"), + cfg.IntOpt('ssh_min_pool_conn', + default=1, + help='Minimum ssh connections in the pool'), + cfg.IntOpt('ssh_max_pool_conn', + default=5, + help='Maximum ssh connections in the pool'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(san_opts) + + +class SanISCSIDriver(ISCSIDriver): + """Base class for SAN-style storage volumes + + A SAN-style storage value is 'different' because the volume controller + probably won't run on it, so we need to access is over SSH or another + remote protocol. + """ + + def __init__(self, *args, **kwargs): + super(SanISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(san_opts) + self.run_local = self.configuration.san_is_local + self.sshpool = None + + def _build_iscsi_target_name(self, volume): + return "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + + def _execute(self, *cmd, **kwargs): + if self.run_local: + return utils.execute(*cmd, **kwargs) + else: + check_exit_code = kwargs.pop('check_exit_code', None) + command = ' '.join(cmd) + return self._run_ssh(command, check_exit_code) + + def _run_ssh(self, command, check_exit_code=True, attempts=1): + if not self.sshpool: + password = self.configuration.san_password + privatekey = self.configuration.san_private_key + min_size = self.configuration.ssh_min_pool_conn + max_size = self.configuration.ssh_max_pool_conn + self.sshpool = utils.SSHPool(self.configuration.san_ip, + self.configuration.san_ssh_port, + self.configuration.ssh_conn_timeout, + self.configuration.san_login, + password=password, + privatekey=privatekey, + min_size=min_size, + max_size=max_size) + last_exception = None + try: + total_attempts = attempts + with self.sshpool.item() as ssh: + while attempts > 0: + attempts -= 1 + try: + return utils.ssh_execute( + ssh, + command, + check_exit_code=check_exit_code) + except Exception as e: + LOG.error(e) + last_exception = e + greenthread.sleep(random.randint(20, 500) / 100.0) + try: + raise exception.ProcessExecutionError( + exit_code=last_exception.exit_code, + stdout=last_exception.stdout, + stderr=last_exception.stderr, + cmd=last_exception.cmd) + except AttributeError: + raise exception.ProcessExecutionError( + exit_code=-1, + stdout="", + stderr="Error running SSH command", + cmd=command) + + except Exception as e: + LOG.error(_("Error running SSH command: %s") % command) + raise e + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + if not self.run_local: + if not (self.configuration.san_password or + self.configuration.san_private_key): + raise exception.InvalidInput( + reason=_('Specify san_password or san_private_key')) + + # The san_ip must always be set, because we use it for the target + if not self.configuration.san_ip: + raise exception.InvalidInput(reason=_("san_ip must be set")) + + def create_cloned_volume(self, volume, src_vref): + """Create a cloen of the specified volume.""" + raise NotImplementedError() diff --git a/cinder/volume/drivers/san/solaris.py b/cinder/volume/drivers/san/solaris.py new file mode 100644 index 0000000000..ca478cd186 --- /dev/null +++ b/cinder/volume/drivers/san/solaris.py @@ -0,0 +1,285 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.volume.drivers.san.san import SanISCSIDriver + +LOG = logging.getLogger(__name__) + +solaris_opts = [ + cfg.StrOpt('san_zfs_volume_base', + default='rpool/', + help='The ZFS path under which to create zvols for volumes.'), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(solaris_opts) + + +class SolarisISCSIDriver(SanISCSIDriver): + """Executes commands relating to Solaris-hosted ISCSI volumes. + + Basic setup for a Solaris iSCSI server: + + pkg install storage-server SUNWiscsit + + svcadm enable stmf + + svcadm enable -r svc:/network/iscsi/target:default + + pfexec itadm create-tpg e1000g0 ${MYIP} + + pfexec itadm create-target -t e1000g0 + + + Then grant the user that will be logging on lots of permissions. + I'm not sure exactly which though: + + zfs allow justinsb create,mount,destroy rpool + + usermod -P'File System Management' justinsb + + usermod -P'Primary Administrator' justinsb + + Also make sure you can login using san_login & san_password/san_private_key + """ + def __init__(self, *cmd, **kwargs): + super(SolarisISCSIDriver, self).__init__(*cmd, + execute=self._execute, + **kwargs) + + def _execute(self, *cmd, **kwargs): + new_cmd = ['pfexec'] + new_cmd.extend(cmd) + return super(SolarisISCSIDriver, self)._execute(*new_cmd, + **kwargs) + + def _view_exists(self, luid): + (out, _err) = self._execute('/usr/sbin/stmfadm', + 'list-view', '-l', luid, + check_exit_code=False) + if "no views found" in out: + return False + + if "View Entry:" in out: + return True + msg = _("Cannot parse list-view output: %s") % out + raise exception.VolumeBackendAPIException(data=msg) + + def _get_target_groups(self): + """Gets list of target groups from host.""" + (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg') + matches = self._get_prefixed_values(out, 'Target group: ') + LOG.debug("target_groups=%s" % matches) + return matches + + def _target_group_exists(self, target_group_name): + return target_group_name not in self._get_target_groups() + + def _get_target_group_members(self, target_group_name): + (out, _err) = self._execute('/usr/sbin/stmfadm', + 'list-tg', '-v', target_group_name) + matches = self._get_prefixed_values(out, 'Member: ') + LOG.debug("members of %s=%s" % (target_group_name, matches)) + return matches + + def _is_target_group_member(self, target_group_name, iscsi_target_name): + return iscsi_target_name in ( + self._get_target_group_members(target_group_name)) + + def _get_iscsi_targets(self): + (out, _err) = self._execute('/usr/sbin/itadm', 'list-target') + matches = self._collect_lines(out) + + # Skip header + if len(matches) != 0: + assert 'TARGET NAME' in matches[0] + matches = matches[1:] + + targets = [] + for line in matches: + items = line.split() + assert len(items) == 3 + targets.append(items[0]) + + LOG.debug("_get_iscsi_targets=%s" % (targets)) + return targets + + def _iscsi_target_exists(self, iscsi_target_name): + return iscsi_target_name in self._get_iscsi_targets() + + def _build_zfs_poolname(self, volume): + zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name']) + return zfs_poolname + + def create_volume(self, volume): + """Creates a volume.""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + + zfs_poolname = self._build_zfs_poolname(volume) + + # Create a zfs volume + cmd = ['/usr/sbin/zfs', 'create'] + if FLAGS.san_thin_provision: + cmd.append('-s') + cmd.extend(['-V', sizestr]) + cmd.append(zfs_poolname) + self._execute(*cmd) + + def _get_luid(self, volume): + zfs_poolname = self._build_zfs_poolname(volume) + zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname + + (out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu') + + lines = self._collect_lines(out) + + # Strip headers + if len(lines) >= 1: + if lines[0] == '': + lines = lines[1:] + + if len(lines) >= 4: + assert 'Found' in lines[0] + assert '' == lines[1] + assert 'GUID' in lines[2] + assert '------------------' in lines[3] + + lines = lines[4:] + + for line in lines: + items = line.split() + assert len(items) == 3 + if items[2] == zvol_name: + luid = items[0].strip() + return luid + + msg = _('LUID not found for %(zfs_poolname)s. ' + 'Output=%(out)s') % locals() + raise exception.VolumeBackendAPIException(data=msg) + + def _is_lu_created(self, volume): + luid = self._get_luid(volume) + return luid + + def delete_volume(self, volume): + """Deletes a volume.""" + zfs_poolname = self._build_zfs_poolname(volume) + self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + escaped_group = FLAGS.volume_group.replace('-', '--') + escaped_name = volume['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + #TODO(justinsb): On bootup, this is called for every volume. + # It then runs ~5 SSH commands for each volume, + # most of which fetch the same info each time + # This makes initial start stupid-slow + return self._do_export(volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(volume, force_create=True) + + def _do_export(self, volume, force_create): + # Create a Logical Unit (LU) backed by the zfs volume + zfs_poolname = self._build_zfs_poolname(volume) + + if force_create or not self._is_lu_created(volume): + zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname + self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name) + + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + # Create a iSCSI target, mapped to just this volume + if force_create or not self._target_group_exists(target_group_name): + self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name) + + # Yes, we add the initiatior before we create it! + # Otherwise, it complains that the target is already active + if force_create or not self._is_target_group_member(target_group_name, + iscsi_name): + self._execute('/usr/sbin/stmfadm', + 'add-tg-member', '-g', target_group_name, iscsi_name) + + if force_create or not self._iscsi_target_exists(iscsi_name): + self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name) + + if force_create or not self._view_exists(luid): + self._execute('/usr/sbin/stmfadm', + 'add-view', '-t', target_group_name, luid) + + #TODO(justinsb): Is this always 1? Does it matter? + iscsi_portal_interface = '1' + iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_name)) + + return db_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + + # This is the reverse of _do_export + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + if self._view_exists(luid): + self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a') + + if self._iscsi_target_exists(iscsi_name): + self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name) + self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name) + + # We don't delete the tg-member; we delete the whole tg! + + if self._target_group_exists(target_group_name): + self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name) + + if self._is_lu_created(volume): + self._execute('/usr/sbin/sbdadm', 'delete-lu', luid) + + def _collect_lines(self, data): + """Split lines from data into an array, trimming them """ + matches = [] + for line in data.splitlines(): + match = line.strip() + matches.append(match) + return matches + + def _get_prefixed_values(self, data, prefix): + """Collect lines which start with prefix; with trimming""" + matches = [] + for line in data.splitlines(): + line = line.strip() + if line.startswith(prefix): + match = line[len(prefix):] + match = match.strip() + matches.append(match) + return matches diff --git a/cinder/volume/drivers/scality.py b/cinder/volume/drivers/scality.py new file mode 100644 index 0000000000..b59fe26be4 --- /dev/null +++ b/cinder/volume/drivers/scality.py @@ -0,0 +1,261 @@ +# Copyright (c) 2013 Scality +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scality SOFS Volume Driver. +""" + +import errno +import os +import urllib2 +import urlparse + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.volume import driver + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('scality_sofs_config', + default=None, + help='Path or URL to Scality SOFS configuration file'), + cfg.StrOpt('scality_sofs_mount_point', + default='$state_path/scality', + help='Base dir where Scality SOFS shall be mounted'), + cfg.StrOpt('scality_sofs_volume_dir', + default='cinder/volumes', + help='Path from Scality SOFS root to volume dir'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(volume_opts) + + +class ScalityDriver(driver.VolumeDriver): + """Scality SOFS cinder driver. + + Creates sparse files on SOFS for hypervisors to use as block + devices. + """ + + def _check_prerequisites(self): + """Sanity checks before attempting to mount SOFS.""" + + # config is mandatory + config = FLAGS.scality_sofs_config + if not config: + msg = _("Value required for 'scality_sofs_config'") + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # config can be a file path or a URL, check it + if urlparse.urlparse(config).scheme == '': + # turn local path into URL + config = 'file://%s' % config + try: + urllib2.urlopen(config, timeout=5).close() + except urllib2.URLError as e: + msg = _("Cannot access 'scality_sofs_config': %s") % e + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # mount.sofs must be installed + if not os.access('/sbin/mount.sofs', os.X_OK): + msg = _("Cannot execute /sbin/mount.sofs") + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _makedirs(self, path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + def _mount_sofs(self): + config = FLAGS.scality_sofs_config + mount_path = FLAGS.scality_sofs_mount_point + sysdir = os.path.join(mount_path, 'sys') + + self._makedirs(mount_path) + if not os.path.isdir(sysdir): + self._execute('mount', '-t', 'sofs', config, mount_path, + run_as_root=True) + if not os.path.isdir(sysdir): + msg = _("Cannot mount Scality SOFS, check syslog for errors") + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _size_bytes(self, size_in_g): + if int(size_in_g) == 0: + return 100 * 1024 * 1024 + return int(size_in_g) * 1024 * 1024 * 1024 + + def _create_file(self, path, size): + with open(path, "ab") as f: + f.truncate(size) + os.chmod(path, 0666) + + def _copy_file(self, src_path, dest_path): + self._execute('dd', 'if=%s' % src_path, 'of=%s' % dest_path, + 'bs=1M', 'conv=fsync,nocreat,notrunc', + run_as_root=True) + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + self._check_prerequisites() + self._mount_sofs() + voldir = os.path.join(FLAGS.scality_sofs_mount_point, + FLAGS.scality_sofs_volume_dir) + if not os.path.isdir(voldir): + self._makedirs(voldir) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_prerequisites() + voldir = os.path.join(FLAGS.scality_sofs_mount_point, + FLAGS.scality_sofs_volume_dir) + if not os.path.isdir(voldir): + msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def create_volume(self, volume): + """Creates a logical volume. + + Can optionally return a Dictionary of changes to the volume + object to be persisted. + """ + self._create_file(self.local_path(volume), + self._size_bytes(volume['size'])) + volume['provider_location'] = self._sofs_path(volume) + return {'provider_location': volume['provider_location']} + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + changes = self.create_volume(volume) + self._copy_file(self.local_path(snapshot), + self.local_path(volume)) + return changes + + def delete_volume(self, volume): + """Deletes a logical volume.""" + os.remove(self.local_path(volume)) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + volume_path = os.path.join(FLAGS.scality_sofs_mount_point, + FLAGS.scality_sofs_volume_dir, + snapshot['volume_name']) + snapshot_path = self.local_path(snapshot) + self._create_file(snapshot_path, + self._size_bytes(snapshot['volume_size'])) + self._copy_file(volume_path, snapshot_path) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + os.remove(self.local_path(snapshot)) + + def _sofs_path(self, volume): + return os.path.join(FLAGS.scality_sofs_volume_dir, + volume['name']) + + def local_path(self, volume): + return os.path.join(FLAGS.scality_sofs_mount_point, + self._sofs_path(volume)) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume. + + Can optionally return a Dictionary of changes to the volume + object to be persisted. + """ + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + return { + 'driver_volume_type': 'scality', + 'data': { + 'sofs_path': self._sofs_path(volume), + } + } + + def terminate_connection(self, volume, connector, force=False, **kwargs): + """Disallow connection from connector.""" + pass + + def attach_volume(self, context, volume_id, instance_uuid, mountpoint): + """ Callback for volume attached to instance.""" + pass + + def detach_volume(self, context, volume_id): + """ Callback for volume detached.""" + pass + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. + + If 'refresh' is True, run the update first. + """ + stats = { + 'vendor_name': 'Scality', + 'driver_version': '1.0', + 'storage_protocol': 'scality', + 'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'infinite', + 'reserved_percentage': 0, + } + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or 'Scality_SOFS' + return stats + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume)) + self.create_volume(volume) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def clone_image(self, volume, image_location): + """Create a volume efficiently from an existing image. + + image_location is a string whose format depends on the + image service backend in use. The driver should use it + to determine whether cloning is possible. + + Returns a boolean indicating whether cloning occurred + """ + return False diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py new file mode 100644 index 0000000000..a539ce1815 --- /dev/null +++ b/cinder/volume/drivers/sheepdog.py @@ -0,0 +1,141 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SheepDog Volume Driver. + +""" +import re + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.volume import driver + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class SheepdogDriver(driver.VolumeDriver): + """Executes commands relating to Sheepdog Volumes""" + + def __init__(self, *args, **kwargs): + super(SheepdogDriver, self).__init__(*args, **kwargs) + self.stats_pattern = re.compile(r'[\w\s%]*Total\s(\d+)\s(\d+)*') + self._stats = {} + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + try: + #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' + # gives short output, but for compatibility reason we won't + # use it and just check if 'running' is in the output. + (out, err) = self._execute('collie', 'cluster', 'info') + if 'running' not in out.split(): + exception_message = (_("Sheepdog is not working: %s") % out) + raise exception.VolumeBackendAPIException( + data=exception_message) + + except exception.ProcessExecutionError: + exception_message = _("Sheepdog is not working") + raise exception.VolumeBackendAPIException(data=exception_message) + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def create_volume(self, volume): + """Creates a sheepdog volume""" + self._try_execute('qemu-img', 'create', + "sheepdog:%s" % volume['name'], + '%sG' % volume['size']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): + """Deletes a logical volume""" + self._try_execute('collie', 'vdi', 'delete', volume['name']) + + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + + def local_path(self, volume): + return "sheepdog:%s" % volume['name'] + + def ensure_export(self, context, volume): + """Safely and synchronously recreates an export for a logical volume""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'sheepdog', + 'data': { + 'name': volume['name'] + } + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def _update_volume_stats(self): + stats = {} + + backend_name = "sheepdog" + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + stats["volume_backend_name"] = backend_name or 'sheepdog' + stats['vendor_name'] = 'Open Source' + stats['dirver_version'] = '1.0' + stats['storage_protocol'] = 'sheepdog' + stats['total_capacity_gb'] = 'unknown' + stats['free_capacity_gb'] = 'unknown' + stats['reserved_percentage'] = 0 + stats['QoS_support'] = False + + try: + stdout, _err = self._execute('collie', 'node', 'info', '-r') + m = self.stats_pattern.match(stdout) + total = float(m.group(1)) + used = float(m.group(2)) + stats['total_capacity_gb'] = total / (1024 ** 3) + stats['free_capacity_gb'] = (total - used) / (1024 ** 3) + except exception.ProcessExecutionError: + LOG.exception(_('error refreshing volume stats')) + + self._stats = stats + + def get_volume_stats(self, refresh=False): + if refresh: + self._update_volume_stats() + return self._stats diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py new file mode 100644 index 0000000000..daab58a6af --- /dev/null +++ b/cinder/volume/drivers/solidfire.py @@ -0,0 +1,590 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import httplib +import json +import math +import random +import socket +import string +import time +import uuid + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume.drivers.san.san import SanISCSIDriver +from cinder.volume import volume_types + +VERSION = '1.2' +LOG = logging.getLogger(__name__) + +sf_opts = [ + cfg.BoolOpt('sf_emulate_512', + default=True, + help='Set 512 byte emulation on volume creation; '), + + cfg.BoolOpt('sf_allow_tenant_qos', + default=False, + help='Allow tenants to specify QOS on create'), + + cfg.StrOpt('sf_account_prefix', + default=socket.gethostname(), + help='Create SolidFire accounts with this prefix'), ] + + +class SolidFire(SanISCSIDriver): + """OpenStack driver to enable SolidFire cluster. + + Version history: + 1.0 - Initial driver + 1.1 - Refactor, clone support, qos by type and minor bug fixes + + """ + + sf_qos_dict = {'slow': {'minIOPS': 100, + 'maxIOPS': 200, + 'burstIOPS': 200}, + 'medium': {'minIOPS': 200, + 'maxIOPS': 400, + 'burstIOPS': 400}, + 'fast': {'minIOPS': 500, + 'maxIOPS': 1000, + 'burstIOPS': 1000}, + 'performant': {'minIOPS': 2000, + 'maxIOPS': 4000, + 'burstIOPS': 4000}, + 'off': None} + + sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] + cluster_stats = {} + + GB = math.pow(2, 30) + + def __init__(self, *args, **kwargs): + super(SolidFire, self).__init__(*args, **kwargs) + self.configuration.append_config_values(sf_opts) + self._update_cluster_status() + + def _issue_api_request(self, method_name, params): + """All API requests to SolidFire device go through this method. + + Simple json-rpc web based API calls. + each call takes a set of paramaters (dict) + and returns results in a dict as well. + + """ + max_simultaneous_clones = ['xMaxSnapshotsPerVolumeExceeded', + 'xMaxClonesPerVolumeExceeded', + 'xMaxSnapshotsPerNodeExceeded', + 'xMaxClonesPerNodeExceeded'] + host = self.configuration.san_ip + # For now 443 is the only port our server accepts requests on + port = 443 + + cluster_admin = self.configuration.san_login + cluster_password = self.configuration.san_password + + # NOTE(jdg): We're wrapping a retry loop for a know XDB issue + # Shows up in very high request rates (ie create 1000 volumes) + # we have to wrap the whole sequence because the request_id + # can't be re-used + retry_count = 5 + while retry_count > 0: + request_id = int(uuid.uuid4()) # just generate a random number + command = {'method': method_name, + 'id': request_id} + + if params is not None: + command['params'] = params + + payload = json.dumps(command, ensure_ascii=False) + payload.encode('utf-8') + header = {'Content-Type': 'application/json-rpc; charset=utf-8'} + + if cluster_password is not None: + # base64.encodestring includes a newline character + # in the result, make sure we strip it off + auth_key = base64.encodestring('%s:%s' % (cluster_admin, + cluster_password))[:-1] + header['Authorization'] = 'Basic %s' % auth_key + + LOG.debug(_("Payload for SolidFire API call: %s"), payload) + + connection = httplib.HTTPSConnection(host, port) + connection.request('POST', '/json-rpc/1.0', payload, header) + response = connection.getresponse() + + data = {} + if response.status != 200: + connection.close() + raise exception.SolidFireAPIException(status=response.status) + + else: + data = response.read() + try: + data = json.loads(data) + except (TypeError, ValueError), exc: + connection.close() + msg = _("Call to json.loads() raised " + "an exception: %s") % exc + raise exception.SfJsonEncodeFailure(msg) + + connection.close() + + LOG.debug(_("Results of SolidFire API call: %s"), data) + + if 'error' in data: + if data['error']['name'] in max_simultaneous_clones: + LOG.warning(_('Clone operation ' + 'encountered: %s') % data['error']['name']) + LOG.warning(_( + 'Waiting for outstanding operation ' + 'before retrying snapshot: %s') % params['name']) + time.sleep(5) + # Don't decrement the retry count for this one + elif 'xDBVersionMismatch' in data['error']['name']: + LOG.warning(_('Detected xDBVersionMismatch, ' + 'retry %s of 5') % (5 - retry_count)) + time.sleep(1) + retry_count -= 1 + elif 'xUnknownAccount' in data['error']['name']: + retry_count = 0 + else: + msg = _("API response: %s") % data + raise exception.SolidFireAPIException(msg) + else: + retry_count = 0 + + return data + + def _get_volumes_by_sfaccount(self, account_id): + """Get all volumes on cluster for specified account.""" + params = {'accountID': account_id} + data = self._issue_api_request('ListVolumesForAccount', params) + if 'result' in data: + return data['result']['volumes'] + + def _get_sfaccount_by_name(self, sf_account_name): + """Get SolidFire account object by name.""" + sfaccount = None + params = {'username': sf_account_name} + data = self._issue_api_request('GetAccountByName', params) + if 'result' in data and 'account' in data['result']: + LOG.debug(_('Found solidfire account: %s'), sf_account_name) + sfaccount = data['result']['account'] + return sfaccount + + def _get_sf_account_name(self, project_id): + """Build the SolidFire account name to use.""" + return '%s%s%s' % (self.configuration.sf_account_prefix, + '-' if self.configuration.sf_account_prefix else '', + project_id) + + def _get_sfaccount(self, project_id): + sf_account_name = self._get_sf_account_name(project_id) + sfaccount = self._get_sfaccount_by_name(sf_account_name) + if sfaccount is None: + raise exception.SfAccountNotFound(account_name=sf_account_name) + + return sfaccount + + def _create_sfaccount(self, project_id): + """Create account on SolidFire device if it doesn't already exist. + + We're first going to check if the account already exits, if it does + just return it. If not, then create it. + + """ + + sf_account_name = self._get_sf_account_name(project_id) + sfaccount = self._get_sfaccount_by_name(sf_account_name) + if sfaccount is None: + LOG.debug(_('solidfire account: %s does not exist, create it...'), + sf_account_name) + chap_secret = self._generate_random_string(12) + params = {'username': sf_account_name, + 'initiatorSecret': chap_secret, + 'targetSecret': chap_secret, + 'attributes': {}} + data = self._issue_api_request('AddAccount', params) + if 'result' in data: + sfaccount = self._get_sfaccount_by_name(sf_account_name) + + return sfaccount + + def _get_cluster_info(self): + """Query the SolidFire cluster for some property info.""" + params = {} + data = self._issue_api_request('GetClusterInfo', params) + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + return data['result'] + + def _do_export(self, volume): + """Gets the associated account, retrieves CHAP info and updates.""" + sfaccount = self._get_sfaccount(volume['project_id']) + + model_update = {} + model_update['provider_auth'] = ('CHAP %s %s' + % (sfaccount['username'], + sfaccount['targetSecret'])) + + return model_update + + def _generate_random_string(self, length): + """Generates random_string to use for CHAP password.""" + + char_set = string.ascii_uppercase + string.digits + return ''.join(random.sample(char_set, length)) + + def _get_model_info(self, sfaccount, sf_volume_id): + """Gets the connection info for specified account and volume.""" + cluster_info = self._get_cluster_info() + iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' + chap_secret = sfaccount['targetSecret'] + + found_volume = False + iteration_count = 0 + while not found_volume and iteration_count < 10: + volume_list = self._get_volumes_by_sfaccount( + sfaccount['accountID']) + iqn = None + for v in volume_list: + if v['volumeID'] == sf_volume_id: + iqn = v['iqn'] + found_volume = True + break + if not found_volume: + time.sleep(2) + iteration_count += 1 + + if not found_volume: + LOG.error(_('Failed to retrieve volume SolidFire-' + 'ID: %s in get_by_account!') % sf_volume_id) + raise exception.VolumeNotFound(volume_id=uuid) + + model_update = {} + # NOTE(john-griffith): SF volumes are always at lun 0 + model_update['provider_location'] = ('%s %s %s' + % (iscsi_portal, iqn, 0)) + model_update['provider_auth'] = ('CHAP %s %s' + % (sfaccount['username'], + chap_secret)) + return model_update + + def _do_clone_volume(self, src_uuid, src_project_id, v_ref): + """Create a clone of an existing volume. + + Currently snapshots are the same as clones on the SF cluster. + Due to the way the SF cluster works there's no loss in efficiency + or space usage between the two. The only thing different right + now is the restore snapshot functionality which has not been + implemented in the pre-release version of the SolidFire Cluster. + + """ + attributes = {} + qos = {} + + sfaccount = self._get_sfaccount(src_project_id) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(src_uuid, params) + if sf_vol is None: + raise exception.VolumeNotFound(volume_id=uuid) + + if 'qos' in sf_vol: + qos = sf_vol['qos'] + + attributes = {'uuid': v_ref['id'], + 'is_clone': 'True', + 'src_uuid': src_uuid} + + if qos: + for k, v in qos.items(): + attributes[k] = str(v) + + params = {'volumeID': int(sf_vol['volumeID']), + 'name': 'UUID-%s' % v_ref['id'], + 'attributes': attributes, + 'qos': qos} + + data = self._issue_api_request('CloneVolume', params) + + if (('result' not in data) or ('volumeID' not in data['result'])): + raise exception.SolidFireAPIDataException(data=data) + + sf_volume_id = data['result']['volumeID'] + model_update = self._get_model_info(sfaccount, sf_volume_id) + if model_update is None: + mesg = _('Failed to get model update from clone') + raise exception.SolidFireAPIDataException(mesg) + + return (data, sfaccount, model_update) + + def _do_volume_create(self, project_id, params): + sfaccount = self._create_sfaccount(project_id) + + params['accountID'] = sfaccount['accountID'] + data = self._issue_api_request('CreateVolume', params) + + if (('result' not in data) or ('volumeID' not in data['result'])): + raise exception.SolidFireAPIDataException(data=data) + + sf_volume_id = data['result']['volumeID'] + return self._get_model_info(sfaccount, sf_volume_id) + + def _set_qos_presets(self, volume): + qos = {} + valid_presets = self.sf_qos_dict.keys() + + #First look to see if they included a preset + presets = [i.value for i in volume.get('volume_metadata') + if i.key == 'sf-qos' and i.value in valid_presets] + if len(presets) > 0: + if len(presets) > 1: + LOG.warning(_('More than one valid preset was ' + 'detected, using %s') % presets[0]) + qos = self.sf_qos_dict[presets[0]] + else: + #look for explicit settings + for i in volume.get('volume_metadata'): + if i.key in self.sf_qos_keys: + qos[i.key] = int(i.value) + return qos + + def _set_qos_by_volume_type(self, ctxt, type_id): + qos = {} + volume_type = volume_types.get_volume_type(ctxt, type_id) + specs = volume_type.get('extra_specs') + for key, value in specs.iteritems(): + if ':' in key: + fields = key.split(':') + key = fields[1] + if key in self.sf_qos_keys: + qos[key] = int(value) + return qos + + def _get_sf_volume(self, uuid, params): + data = self._issue_api_request('ListVolumesForAccount', params) + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + found_count = 0 + sf_volref = None + for v in data['result']['volumes']: + if uuid in v['name']: + found_count += 1 + sf_volref = v + LOG.debug(_("Mapped SolidFire volumeID %(sfid)s " + "to cinder ID %(uuid)s.") % + {'sfid': v['volumeID'], + 'uuid': uuid}) + + if found_count == 0: + # NOTE(jdg): Previously we would raise here, but there are cases + # where this might be a cleanup for a failed delete. + # Until we get better states we'll just log an error + LOG.error(_("Volume %s, not found on SF Cluster."), uuid) + + if found_count > 1: + LOG.error(_("Found %(count)s volumes mapped to id: %(uuid)s.") % + {'count': found_count, + 'uuid': uuid}) + raise exception.DuplicateSfVolumeNames(vol_name=uuid) + + return sf_volref + + def create_volume(self, volume): + """Create volume on SolidFire device. + + The account is where CHAP settings are derived from, volume is + created and exported. Note that the new volume is immediately ready + for use. + + One caveat here is that an existing user account must be specified + in the API call to create a new volume. We use a set algorithm to + determine account info based on passed in cinder volume object. First + we check to see if the account already exists (and use it), or if it + does not already exist, we'll go ahead and create it. + + """ + slice_count = 1 + attributes = {} + qos = {} + + if (self.configuration.sf_allow_tenant_qos and + volume.get('volume_metadata')is not None): + qos = self._set_qos_presets(volume) + + ctxt = context.get_admin_context() + type_id = volume['volume_type_id'] + if type_id is not None: + qos = self._set_qos_by_volume_type(ctxt, type_id) + + attributes = {'uuid': volume['id'], + 'is_clone': 'False'} + if qos: + for k, v in qos.items(): + attributes[k] = str(v) + + params = {'name': 'UUID-%s' % volume['id'], + 'accountID': None, + 'sliceCount': slice_count, + 'totalSize': int(volume['size'] * self.GB), + 'enable512e': self.configuration.sf_emulate_512, + 'attributes': attributes, + 'qos': qos} + + return self._do_volume_create(volume['project_id'], params) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of an existing volume.""" + (data, sfaccount, model) = self._do_clone_volume( + src_vref['id'], + src_vref['project_id'], + volume) + + return model + + def delete_volume(self, volume): + """Delete SolidFire Volume from device. + + SolidFire allows multipe volumes with same name, + volumeID is what's guaranteed unique. + + """ + + LOG.debug(_("Enter SolidFire delete_volume...")) + + sfaccount = self._get_sfaccount(volume['project_id']) + if sfaccount is None: + LOG.error(_("Account for Volume ID %s was not found on " + "the SolidFire Cluster!") % volume['id']) + LOG.error(_("This usually means the volume was never " + "succesfully created.")) + return + + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + + if sf_vol is not None: + params = {'volumeID': sf_vol['volumeID']} + data = self._issue_api_request('DeleteVolume', params) + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + else: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + + LOG.debug(_("Leaving SolidFire delete_volume")) + + def ensure_export(self, context, volume): + """Verify the iscsi export info.""" + LOG.debug(_("Executing SolidFire ensure_export...")) + return self._do_export(volume) + + def create_export(self, context, volume): + """Setup the iscsi export info.""" + LOG.debug(_("Executing SolidFire create_export...")) + return self._do_export(volume) + + def delete_snapshot(self, snapshot): + """Delete the specified snapshot from the SolidFire cluster.""" + self.delete_volume(snapshot) + + def create_snapshot(self, snapshot): + """Create a snapshot of a volume on the SolidFire cluster. + + Note that for SolidFire Clusters currently there is no snapshot + implementation. Due to the way SF does cloning there's no performance + hit or extra space used. The only thing that's lacking from this is + the abilit to restore snaps. + + After GA a true snapshot implementation will be available with + restore at which time we'll rework this appropriately. + + """ + (data, sfaccount, model) = self._do_clone_volume( + snapshot['volume_id'], + snapshot['project_id'], + snapshot) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from the specified snapshot.""" + (data, sfaccount, model) = self._do_clone_volume( + snapshot['id'], + snapshot['project_id'], + volume) + + return model + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update first. + The name is a bit misleading as + the majority of the data here is cluster + data + """ + if refresh: + self._update_cluster_status() + + return self.cluster_stats + + def _update_cluster_status(self): + """Retrieve status info for the Cluster.""" + + LOG.debug(_("Updating cluster status info")) + + params = {} + + # NOTE(jdg): The SF api provides an UNBELIEVABLE amount + # of stats data, this is just one of the calls + results = self._issue_api_request('GetClusterCapacity', params) + if 'result' not in results: + LOG.error(_('Failed to get updated stats')) + + results = results['result']['clusterCapacity'] + free_capacity =\ + results['maxProvisionedSpace'] - results['usedSpace'] + + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or self.__class__.__name__ + data["vendor_name"] = 'SolidFire Inc' + data["driver_version"] = VERSION + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = results['maxProvisionedSpace'] + + data['free_capacity_gb'] = float(free_capacity) + data['reserved_percentage'] = 0 + data['QoS_support'] = True + data['compression_percent'] =\ + results['compressionPercent'] + data['deduplicaton_percent'] =\ + results['deDuplicationPercent'] + data['thin_provision_percent'] =\ + results['thinProvisioningPercent'] + self.cluster_stats = data diff --git a/cinder/volume/drivers/storwize_svc.py b/cinder/volume/drivers/storwize_svc.py new file mode 100755 index 0000000000..8f44477fa5 --- /dev/null +++ b/cinder/volume/drivers/storwize_svc.py @@ -0,0 +1,1627 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 IBM Corp. +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Ronen Kat +# Avishay Traeger + +""" +Volume driver for IBM Storwize family and SVC storage systems. + +Notes: +1. If you specify both a password and a key file, this driver will use the + key file only. +2. When using a key file for authentication, it is up to the user or + system administrator to store the private key in a safe manner. +3. The defaults for creating volumes are "-rsize 2% -autoexpand + -grainsize 256 -warning 0". These can be changed in the configuration + file or by using volume types(recommended only for advanced users). + +Limitations: +1. The driver expects CLI output in English, error messages may be in a + localized format. +2. Clones and creating volumes from snapshots, where the source and target + are of different sizes, is not supported. + +""" + +import random +import re +import string +import time + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import utils +from cinder.volume.drivers.san import san +from cinder.volume import volume_types + +VERSION = 1.1 +LOG = logging.getLogger(__name__) + +storwize_svc_opts = [ + cfg.StrOpt('storwize_svc_volpool_name', + default='volpool', + help='Storage system storage pool for volumes'), + cfg.IntOpt('storwize_svc_vol_rsize', + default=2, + help='Storage system space-efficiency parameter for volumes ' + '(percentage)'), + cfg.IntOpt('storwize_svc_vol_warning', + default=0, + help='Storage system threshold for volume capacity warnings ' + '(percentage)'), + cfg.BoolOpt('storwize_svc_vol_autoexpand', + default=True, + help='Storage system autoexpand parameter for volumes ' + '(True/False)'), + cfg.IntOpt('storwize_svc_vol_grainsize', + default=256, + help='Storage system grain size parameter for volumes ' + '(32/64/128/256)'), + cfg.BoolOpt('storwize_svc_vol_compression', + default=False, + help='Storage system compression option for volumes'), + cfg.BoolOpt('storwize_svc_vol_easytier', + default=True, + help='Enable Easy Tier for volumes'), + cfg.IntOpt('storwize_svc_flashcopy_timeout', + default=120, + help='Maximum number of seconds to wait for FlashCopy to be ' + 'prepared. Maximum value is 600 seconds (10 minutes).'), + cfg.StrOpt('storwize_svc_connection_protocol', + default='iSCSI', + help='Connection protocol (iSCSI/FC)'), + cfg.BoolOpt('storwize_svc_multipath_enabled', + default=False, + help='Connect with multipath (currently FC-only)'), + cfg.BoolOpt('storwize_svc_multihostmap_enabled', + default=True, + help='Allows vdisk to multi host mapping'), +] + + +class StorwizeSVCDriver(san.SanISCSIDriver): + """IBM Storwize V7000 and SVC iSCSI/FC volume driver. + + Version history: + 1.0 - Initial driver + 1.1 - FC support, create_cloned_volume, volume type support, + get_volume_stats, minor bug fixes + + """ + + """=====================================================================""" + """ SETUP """ + """=====================================================================""" + + def __init__(self, *args, **kwargs): + super(StorwizeSVCDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(storwize_svc_opts) + self._storage_nodes = {} + self._enabled_protocols = set() + self._compression_enabled = False + self._context = None + + # Build cleanup translation tables for host names + invalid_ch_in_host = '' + for num in range(0, 128): + ch = str(chr(num)) + if (not ch.isalnum() and ch != ' ' and ch != '.' + and ch != '-' and ch != '_'): + invalid_ch_in_host = invalid_ch_in_host + ch + self._string_host_name_filter = string.maketrans( + invalid_ch_in_host, '-' * len(invalid_ch_in_host)) + + self._unicode_host_name_filter = dict((ord(unicode(char)), u'-') + for char in invalid_ch_in_host) + + def _get_iscsi_ip_addrs(self): + generator = self._port_conf_generator('svcinfo lsportip') + header = next(generator, None) + if not header: + return + + for port_data in generator: + try: + port_node_id = port_data['node_id'] + port_ipv4 = port_data['IP_address'] + port_ipv6 = port_data['IP_address_6'] + state = port_data['state'] + except KeyError: + self._handle_keyerror('lsportip', header) + + if port_node_id in self._storage_nodes and ( + state == 'configured' or state == 'online'): + node = self._storage_nodes[port_node_id] + if len(port_ipv4): + node['ipv4'].append(port_ipv4) + if len(port_ipv6): + node['ipv6'].append(port_ipv6) + + def _get_fc_wwpns(self): + for key in self._storage_nodes: + node = self._storage_nodes[key] + ssh_cmd = 'svcinfo lsnode -delim ! %s' % node['id'] + raw = self._run_ssh(ssh_cmd) + resp = CLIResponse(raw, delim='!', with_header=False) + wwpns = set(node['WWPN']) + for i, s in resp.select('port_id', 'port_status'): + if 'unconfigured' != s: + wwpns.add(i) + node['WWPN'] = list(wwpns) + LOG.info(_('WWPN on node %(node)s: %(wwpn)s') + % {'node': node['id'], 'wwpn': node['WWPN']}) + + def do_setup(self, ctxt): + """Check that we have all configuration details from the storage.""" + + LOG.debug(_('enter: do_setup')) + self._context = ctxt + + # Validate that the pool exists + ssh_cmd = 'svcinfo lsmdiskgrp -delim ! -nohdr' + out, err = self._run_ssh(ssh_cmd) + self._assert_ssh_return(len(out.strip()), 'do_setup', + ssh_cmd, out, err) + search_text = '!%s!' % self.configuration.storwize_svc_volpool_name + if search_text not in out: + raise exception.InvalidInput( + reason=(_('pool %s doesn\'t exist') + % self.configuration.storwize_svc_volpool_name)) + + # Check if compression is supported + self._compression_enabled = False + try: + ssh_cmd = 'svcinfo lslicense -delim !' + out, err = self._run_ssh(ssh_cmd) + license_lines = out.strip().split('\n') + for license_line in license_lines: + name, foo, value = license_line.partition('!') + if name in ('license_compression_enclosures', + 'license_compression_capacity') and value != '0': + self._compression_enabled = True + break + except exception.ProcessExecutionError: + LOG.exception(_('Failed to get license information.')) + + # Get the iSCSI and FC names of the Storwize/SVC nodes + ssh_cmd = 'svcinfo lsnode -delim !' + out, err = self._run_ssh(ssh_cmd) + self._assert_ssh_return(len(out.strip()), 'do_setup', + ssh_cmd, out, err) + + nodes = out.strip().split('\n') + self._assert_ssh_return(len(nodes), + 'do_setup', ssh_cmd, out, err) + header = nodes.pop(0) + for node_line in nodes: + try: + node_data = self._get_hdr_dic(header, node_line, '!') + except exception.VolumeBackendAPIException: + with excutils.save_and_reraise_exception(): + self._log_cli_output_error('do_setup', + ssh_cmd, out, err) + node = {} + try: + node['id'] = node_data['id'] + node['name'] = node_data['name'] + node['IO_group'] = node_data['IO_group_id'] + node['iscsi_name'] = node_data['iscsi_name'] + node['WWNN'] = node_data['WWNN'] + node['status'] = node_data['status'] + node['WWPN'] = [] + node['ipv4'] = [] + node['ipv6'] = [] + node['enabled_protocols'] = [] + if node['status'] == 'online': + self._storage_nodes[node['id']] = node + except KeyError: + self._handle_keyerror('lsnode', header) + + # Get the iSCSI IP addresses and WWPNs of the Storwize/SVC nodes + self._get_iscsi_ip_addrs() + self._get_fc_wwpns() + + # For each node, check what connection modes it supports. Delete any + # nodes that do not support any types (may be partially configured). + to_delete = [] + for k, node in self._storage_nodes.iteritems(): + if ((len(node['ipv4']) or len(node['ipv6'])) + and len(node['iscsi_name'])): + node['enabled_protocols'].append('iSCSI') + self._enabled_protocols.add('iSCSI') + if len(node['WWPN']): + node['enabled_protocols'].append('FC') + self._enabled_protocols.add('FC') + if not len(node['enabled_protocols']): + to_delete.append(k) + + for delkey in to_delete: + del self._storage_nodes[delkey] + + # Make sure we have at least one node configured + self._driver_assert(len(self._storage_nodes), + _('do_setup: No configured nodes')) + + LOG.debug(_('leave: do_setup')) + + def _build_default_opts(self): + # Ignore capitalization + protocol = self.configuration.storwize_svc_connection_protocol + if protocol.lower() == 'fc': + protocol = 'FC' + elif protocol.lower() == 'iscsi': + protocol = 'iSCSI' + + opt = {'rsize': self.configuration.storwize_svc_vol_rsize, + 'warning': self.configuration.storwize_svc_vol_warning, + 'autoexpand': self.configuration.storwize_svc_vol_autoexpand, + 'grainsize': self.configuration.storwize_svc_vol_grainsize, + 'compression': self.configuration.storwize_svc_vol_compression, + 'easytier': self.configuration.storwize_svc_vol_easytier, + 'protocol': protocol, + 'multipath': self.configuration.storwize_svc_multipath_enabled} + return opt + + def check_for_setup_error(self): + """Ensure that the flags are set properly.""" + LOG.debug(_('enter: check_for_setup_error')) + + required_flags = ['san_ip', 'san_ssh_port', 'san_login', + 'storwize_svc_volpool_name'] + for flag in required_flags: + if not self.configuration.safe_get(flag): + raise exception.InvalidInput(reason=_('%s is not set') % flag) + + # Ensure that either password or keyfile were set + if not (self.configuration.san_password or + self.configuration.san_private_key): + raise exception.InvalidInput( + reason=_('Password or SSH private key is required for ' + 'authentication: set either san_password or ' + 'san_private_key option')) + + # Check that flashcopy_timeout is not more than 10 minutes + flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout + if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600): + raise exception.InvalidInput( + reason=_('Illegal value %d specified for ' + 'storwize_svc_flashcopy_timeout: ' + 'valid values are between 0 and 600') + % flashcopy_timeout) + + opts = self._build_default_opts() + self._check_vdisk_opts(opts) + + LOG.debug(_('leave: check_for_setup_error')) + + """=====================================================================""" + """ INITIALIZE/TERMINATE CONNECTIONS """ + """=====================================================================""" + + def ensure_export(self, ctxt, volume): + """Check that the volume exists on the storage. + + The system does not "export" volumes as a Linux iSCSI target does, + and therefore we just check that the volume exists on the storage. + """ + volume_defined = self._is_vdisk_defined(volume['name']) + if not volume_defined: + LOG.error(_('ensure_export: Volume %s not found on storage') + % volume['name']) + + def create_export(self, ctxt, volume): + model_update = None + return model_update + + def remove_export(self, ctxt, volume): + pass + + def _add_chapsecret_to_host(self, host_name): + """Generate and store a randomly-generated CHAP secret for the host.""" + + chap_secret = utils.generate_password() + ssh_cmd = ('svctask chhost -chapsecret "%(chap_secret)s" %(host_name)s' + % {'chap_secret': chap_secret, 'host_name': host_name}) + out, err = self._run_ssh(ssh_cmd) + # No output should be returned from chhost + self._assert_ssh_return(len(out.strip()) == 0, + '_add_chapsecret_to_host', ssh_cmd, out, err) + return chap_secret + + def _get_chap_secret_for_host(self, host_name): + """Return the CHAP secret for the given host.""" + + LOG.debug(_('enter: _get_chap_secret_for_host: host name %s') + % host_name) + + ssh_cmd = 'svcinfo lsiscsiauth -delim !' + out, err = self._run_ssh(ssh_cmd) + + if not len(out.strip()): + return None + + host_lines = out.strip().split('\n') + self._assert_ssh_return(len(host_lines), '_get_chap_secret_for_host', + ssh_cmd, out, err) + + header = host_lines.pop(0).split('!') + self._assert_ssh_return('name' in header, '_get_chap_secret_for_host', + ssh_cmd, out, err) + self._assert_ssh_return('iscsi_auth_method' in header, + '_get_chap_secret_for_host', ssh_cmd, out, err) + self._assert_ssh_return('iscsi_chap_secret' in header, + '_get_chap_secret_for_host', ssh_cmd, out, err) + name_index = header.index('name') + method_index = header.index('iscsi_auth_method') + secret_index = header.index('iscsi_chap_secret') + + chap_secret = None + host_found = False + for line in host_lines: + info = line.split('!') + if info[name_index] == host_name: + host_found = True + if info[method_index] == 'chap': + chap_secret = info[secret_index] + + self._assert_ssh_return(host_found, '_get_chap_secret_for_host', + ssh_cmd, out, err) + + LOG.debug(_('leave: _get_chap_secret_for_host: host name ' + '%(host_name)s with secret %(chap_secret)s') + % {'host_name': host_name, 'chap_secret': chap_secret}) + + return chap_secret + + def _connector_to_hostname_prefix(self, connector): + """Translate connector info to storage system host name. + + Translate a host's name and IP to the prefix of its hostname on the + storage subsystem. We create a host name host name from the host and + IP address, replacing any invalid characters (at most 55 characters), + and adding a random 8-character suffix to avoid collisions. The total + length should be at most 63 characters. + + """ + + host_name = connector['host'] + if isinstance(host_name, unicode): + host_name = host_name.translate(self._unicode_host_name_filter) + elif isinstance(host_name, str): + host_name = host_name.translate(self._string_host_name_filter) + else: + msg = _('_create_host: Cannot clean host name. Host name ' + 'is not unicode or string') + LOG.error(msg) + raise exception.NoValidHost(reason=msg) + + host_name = str(host_name) + return host_name[:55] + + def _find_host_from_wwpn(self, connector): + for wwpn in connector['wwpns']: + ssh_cmd = 'svcinfo lsfabric -wwpn %s -delim !' % wwpn + out, err = self._run_ssh(ssh_cmd) + + if not len(out.strip()): + # This WWPN is not in use + continue + + host_lines = out.strip().split('\n') + header = host_lines.pop(0).split('!') + self._assert_ssh_return('remote_wwpn' in header and + 'name' in header, + '_find_host_from_wwpn', + ssh_cmd, out, err) + rmt_wwpn_idx = header.index('remote_wwpn') + name_idx = header.index('name') + + wwpns = map(lambda x: x.split('!')[rmt_wwpn_idx], host_lines) + + if wwpn in wwpns: + # All the wwpns will be the mapping for the same + # host from this WWPN-based query. Just pick + # the name from first line. + hostname = host_lines[0].split('!')[name_idx] + return hostname + + # Didn't find a host + return None + + def _find_host_exhaustive(self, connector, hosts): + for host in hosts: + ssh_cmd = 'svcinfo lshost -delim ! %s' % host + out, err = self._run_ssh(ssh_cmd) + self._assert_ssh_return(len(out.strip()), + '_find_host_exhaustive', + ssh_cmd, out, err) + for attr_line in out.split('\n'): + # If '!' not found, return the string and two empty strings + attr_name, foo, attr_val = attr_line.partition('!') + if (attr_name == 'iscsi_name' and + 'initiator' in connector and + attr_val == connector['initiator']): + return host + elif (attr_name == 'WWPN' and + 'wwpns' in connector and + attr_val.lower() in + map(str.lower, map(str, connector['wwpns']))): + return host + return None + + def _get_host_from_connector(self, connector): + """List the hosts defined in the storage. + + Return the host name with the given connection info, or None if there + is no host fitting that information. + + """ + + prefix = self._connector_to_hostname_prefix(connector) + LOG.debug(_('enter: _get_host_from_connector: prefix %s') % prefix) + + # Get list of host in the storage + ssh_cmd = 'svcinfo lshost -delim !' + out, err = self._run_ssh(ssh_cmd) + + if not len(out.strip()): + return None + + # If we have FC information, we have a faster lookup option + hostname = None + if 'wwpns' in connector: + hostname = self._find_host_from_wwpn(connector) + + # If we don't have a hostname yet, try the long way + if not hostname: + host_lines = out.strip().split('\n') + self._assert_ssh_return(len(host_lines), + '_get_host_from_connector', + ssh_cmd, out, err) + header = host_lines.pop(0).split('!') + self._assert_ssh_return('name' in header, + '_get_host_from_connector', + ssh_cmd, out, err) + name_index = header.index('name') + hosts = map(lambda x: x.split('!')[name_index], host_lines) + hostname = self._find_host_exhaustive(connector, hosts) + + LOG.debug(_('leave: _get_host_from_connector: host %s') % hostname) + + return hostname + + def _create_host(self, connector): + """Create a new host on the storage system. + + We create a host name and associate it with the given connection + information. + + """ + + LOG.debug(_('enter: _create_host: host %s') % connector['host']) + + rand_id = str(random.randint(0, 99999999)).zfill(8) + host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), + rand_id) + + # Get all port information from the connector + ports = [] + if 'initiator' in connector: + ports.append('-iscsiname %s' % connector['initiator']) + if 'wwpns' in connector: + for wwpn in connector['wwpns']: + ports.append('-hbawwpn %s' % wwpn) + + # When creating a host, we need one port + self._driver_assert(len(ports), _('_create_host: No connector ports')) + port1 = ports.pop(0) + ssh_cmd = ('svctask mkhost -force %(port1)s -name "%(host_name)s"' % + {'port1': port1, 'host_name': host_name}) + out, err = self._run_ssh(ssh_cmd) + self._assert_ssh_return('successfully created' in out, + '_create_host', ssh_cmd, out, err) + + # Add any additional ports to the host + for port in ports: + ssh_cmd = ('svctask addhostport -force %s %s' % (port, host_name)) + out, err = self._run_ssh(ssh_cmd) + + LOG.debug(_('leave: _create_host: host %(host)s - %(host_name)s') % + {'host': connector['host'], 'host_name': host_name}) + return host_name + + def _get_hostvdisk_mappings(self, host_name): + """Return the defined storage mappings for a host.""" + + return_data = {} + ssh_cmd = 'svcinfo lshostvdiskmap -delim ! %s' % host_name + out, err = self._run_ssh(ssh_cmd) + + mappings = out.strip().split('\n') + if len(mappings): + header = mappings.pop(0) + for mapping_line in mappings: + mapping_data = self._get_hdr_dic(header, mapping_line, '!') + return_data[mapping_data['vdisk_name']] = mapping_data + + return return_data + + def _map_vol_to_host(self, volume_name, host_name): + """Create a mapping between a volume to a host.""" + + LOG.debug(_('enter: _map_vol_to_host: volume %(volume_name)s to ' + 'host %(host_name)s') + % {'volume_name': volume_name, 'host_name': host_name}) + + # Check if this volume is already mapped to this host + mapping_data = self._get_hostvdisk_mappings(host_name) + + mapped_flag = False + result_lun = '-1' + if volume_name in mapping_data: + mapped_flag = True + result_lun = mapping_data[volume_name]['SCSI_id'] + else: + lun_used = [] + for k, v in mapping_data.iteritems(): + lun_used.append(int(v['SCSI_id'])) + lun_used.sort() + # Assume all luns are taken to this point, and then try to find + # an unused one + result_lun = str(len(lun_used)) + for index, n in enumerate(lun_used): + if n > index: + result_lun = str(index) + break + + # Volume is not mapped to host, create a new LUN + if not mapped_flag: + ssh_cmd = ('svctask mkvdiskhostmap -host %(host_name)s -scsi ' + '%(result_lun)s %(volume_name)s' % + {'host_name': host_name, + 'result_lun': result_lun, + 'volume_name': volume_name}) + out, err = self._run_ssh(ssh_cmd, check_exit_code=False) + if err and err.startswith('CMMVC6071E'): + if not self.configuration.storwize_svc_multihostmap_enabled: + LOG.error(_('storwize_svc_multihostmap_enabled is set ' + 'to Flase, Not allow multi host mapping')) + exception_msg = 'CMMVC6071E The VDisk-to-host mapping '\ + 'was not created because the VDisk is '\ + 'already mapped to a host.\n"' + raise exception.CinderException(data=exception_msg) + ssh_cmd = ssh_cmd.replace('mkvdiskhostmap', + 'mkvdiskhostmap -force') + # try to map one volume to multiple hosts + out, err = self._run_ssh(ssh_cmd) + LOG.warn(_('volume %s mapping to multi host') % volume_name) + self._assert_ssh_return('successfully created' in out, + '_map_vol_to_host', ssh_cmd, out, err) + else: + self._assert_ssh_return('successfully created' in out, + '_map_vol_to_host', ssh_cmd, out, err) + LOG.debug(_('leave: _map_vol_to_host: LUN %(result_lun)s, volume ' + '%(volume_name)s, host %(host_name)s') % + {'result_lun': result_lun, + 'volume_name': volume_name, + 'host_name': host_name}) + return result_lun + + def _delete_host(self, host_name): + """Delete a host on the storage system.""" + + LOG.debug(_('enter: _delete_host: host %s ') % host_name) + + ssh_cmd = 'svctask rmhost %s ' % host_name + out, err = self._run_ssh(ssh_cmd) + # No output should be returned from rmhost + self._assert_ssh_return(len(out.strip()) == 0, + '_delete_host', ssh_cmd, out, err) + + LOG.debug(_('leave: _delete_host: host %s ') % host_name) + + def _get_conn_fc_wwpns(self, host_name): + wwpns = [] + cmd = 'svcinfo lsfabric -host %s' % host_name + generator = self._port_conf_generator(cmd) + header = next(generator, None) + if not header: + return wwpns + + for port_data in generator: + try: + wwpns.append(port_data['local_wwpn']) + except KeyError as e: + self._handle_keyerror('lsfabric', header) + + return wwpns + + def initialize_connection(self, volume, connector): + """Perform the necessary work so that an iSCSI/FC connection can + be made. + + To be able to create an iSCSI/FC connection from a given host to a + volume, we must: + 1. Translate the given iSCSI name or WWNN to a host name + 2. Create new host on the storage system if it does not yet exist + 3. Map the volume to the host if it is not already done + 4. Return the connection information for relevant nodes (in the + proper I/O group) + + """ + + LOG.debug(_('enter: initialize_connection: volume %(vol)s with ' + 'connector %(conn)s') % {'vol': str(volume), + 'conn': str(connector)}) + + vol_opts = self._get_vdisk_params(volume['volume_type_id']) + host_name = connector['host'] + volume_name = volume['name'] + + # Check if a host object is defined for this host name + host_name = self._get_host_from_connector(connector) + if host_name is None: + # Host does not exist - add a new host to Storwize/SVC + host_name = self._create_host(connector) + # Verify that create_new_host succeeded + self._driver_assert( + host_name is not None, + _('_create_host failed to return the host name.')) + + if vol_opts['protocol'] == 'iSCSI': + chap_secret = self._get_chap_secret_for_host(host_name) + if chap_secret is None: + chap_secret = self._add_chapsecret_to_host(host_name) + + volume_attributes = self._get_vdisk_attributes(volume_name) + lun_id = self._map_vol_to_host(volume_name, host_name) + + self._driver_assert(volume_attributes is not None, + _('initialize_connection: Failed to get attributes' + ' for volume %s') % volume_name) + + try: + preferred_node = volume_attributes['preferred_node_id'] + IO_group = volume_attributes['IO_group_id'] + except KeyError as e: + LOG.error(_('Did not find expected column name in ' + 'lsvdisk: %s') % str(e)) + exception_msg = (_('initialize_connection: Missing volume ' + 'attribute for volume %s') % volume_name) + raise exception.VolumeBackendAPIException(data=exception_msg) + + try: + # Get preferred node and other nodes in I/O group + preferred_node_entry = None + io_group_nodes = [] + for k, node in self._storage_nodes.iteritems(): + if vol_opts['protocol'] not in node['enabled_protocols']: + continue + if node['id'] == preferred_node: + preferred_node_entry = node + if node['IO_group'] == IO_group: + io_group_nodes.append(node) + + if not len(io_group_nodes): + exception_msg = (_('initialize_connection: No node found in ' + 'I/O group %(gid)s for volume %(vol)s') % + {'gid': IO_group, 'vol': volume_name}) + raise exception.VolumeBackendAPIException(data=exception_msg) + + if not preferred_node_entry and not vol_opts['multipath']: + # Get 1st node in I/O group + preferred_node_entry = io_group_nodes[0] + LOG.warn(_('initialize_connection: Did not find a preferred ' + 'node for volume %s') % volume_name) + + properties = {} + properties['target_discovered'] = False + properties['target_lun'] = lun_id + properties['volume_id'] = volume['id'] + if vol_opts['protocol'] == 'iSCSI': + type_str = 'iscsi' + # We take the first IP address for now. Ideally, OpenStack will + # support iSCSI multipath for improved performance. + if len(preferred_node_entry['ipv4']): + ipaddr = preferred_node_entry['ipv4'][0] + else: + ipaddr = preferred_node_entry['ipv6'][0] + properties['target_portal'] = '%s:%s' % (ipaddr, '3260') + properties['target_iqn'] = preferred_node_entry['iscsi_name'] + properties['auth_method'] = 'CHAP' + properties['auth_username'] = connector['initiator'] + properties['auth_password'] = chap_secret + else: + type_str = 'fibre_channel' + conn_wwpns = self._get_conn_fc_wwpns(host_name) + if not vol_opts['multipath']: + if preferred_node_entry['WWPN'] in conn_wwpns: + properties['target_wwn'] = preferred_node_entry['WWPN'] + else: + properties['target_wwn'] = conn_wwpns[0] + else: + properties['target_wwn'] = conn_wwpns + except Exception: + with excutils.save_and_reraise_exception(): + self.terminate_connection(volume, connector) + LOG.error(_('initialize_connection: Failed to collect return ' + 'properties for volume %(vol)s and connector ' + '%(conn)s.\n') % {'vol': str(volume), + 'conn': str(connector)}) + + LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n ' + 'connector %(conn)s\n properties: %(prop)s') + % {'vol': str(volume), + 'conn': str(connector), + 'prop': str(properties)}) + + return {'driver_volume_type': type_str, 'data': properties, } + + def terminate_connection(self, volume, connector, **kwargs): + """Cleanup after an iSCSI connection has been terminated. + + When we clean up a terminated connection between a given connector + and volume, we: + 1. Translate the given connector to a host name + 2. Remove the volume-to-host mapping if it exists + 3. Delete the host if it has no more mappings (hosts are created + automatically by this driver when mappings are created) + """ + LOG.debug(_('enter: terminate_connection: volume %(vol)s with ' + 'connector %(conn)s') % {'vol': str(volume), + 'conn': str(connector)}) + + vol_name = volume['name'] + host_name = self._get_host_from_connector(connector) + # Verify that _get_host_from_connector returned the host. + # This should always succeed as we terminate an existing connection. + self._driver_assert( + host_name is not None, + _('_get_host_from_connector failed to return the host name ' + 'for connector')) + + # Check if vdisk-host mapping exists, remove if it does + mapping_data = self._get_hostvdisk_mappings(host_name) + if vol_name in mapping_data: + ssh_cmd = 'svctask rmvdiskhostmap -host %s %s' % \ + (host_name, vol_name) + out, err = self._run_ssh(ssh_cmd) + # Verify CLI behaviour - no output is returned from + # rmvdiskhostmap + self._assert_ssh_return(len(out.strip()) == 0, + 'terminate_connection', ssh_cmd, out, err) + del mapping_data[vol_name] + else: + LOG.error(_('terminate_connection: No mapping of volume ' + '%(vol_name)s to host %(host_name)s found') % + {'vol_name': vol_name, 'host_name': host_name}) + + # If this host has no more mappings, delete it + if not mapping_data: + self._delete_host(host_name) + + LOG.debug(_('leave: terminate_connection: volume %(vol)s with ' + 'connector %(conn)s') % {'vol': str(volume), + 'conn': str(connector)}) + + """=====================================================================""" + """ VOLUMES/SNAPSHOTS """ + """=====================================================================""" + + def _get_vdisk_attributes(self, vdisk_name): + """Return vdisk attributes, or None if vdisk does not exist + + Exception is raised if the information from system can not be + parsed/matched to a single vdisk. + """ + + ssh_cmd = 'svcinfo lsvdisk -bytes -delim ! %s ' % vdisk_name + return self._execute_command_and_parse_attributes(ssh_cmd) + + def _get_vdisk_fc_mappings(self, vdisk_name): + """Return FlashCopy mappings that this vdisk is associated with.""" + + ssh_cmd = 'svcinfo lsvdiskfcmappings -nohdr %s' % vdisk_name + out, err = self._run_ssh(ssh_cmd) + + mapping_ids = [] + if (len(out.strip())): + lines = out.strip().split('\n') + for line in lines: + mapping_ids.append(line.split()[0]) + return mapping_ids + + def _get_vdisk_params(self, type_id): + opts = self._build_default_opts() + if type_id: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, type_id) + specs = volume_type.get('extra_specs') + for k, value in specs.iteritems(): + # Get the scope, if using scope format + key_split = k.split(':') + if len(key_split) == 1: + scope = None + key = key_split[0] + else: + scope = key_split[0] + key = key_split[1] + + # We generally do not look at capabilities in the driver, but + # protocol is a special case where the user asks for a given + # protocol and we want both the scheduler and the driver to act + # on the value. + if scope == 'capabilities' and key == 'storage_protocol': + scope = None + key = 'protocol' + words = value.split() + self._driver_assert(words and + len(words) == 2 and + words[0] == '', + _('protocol must be specified as ' + '\' iSCSI\' or \' FC\'')) + del words[0] + value = words[0] + + # Anything keys that the driver should look at should have the + # 'drivers' scope. + if scope and scope != "drivers": + continue + + if key in opts: + this_type = type(opts[key]).__name__ + if this_type == 'int': + value = int(value) + elif this_type == 'bool': + value = strutils.bool_from_string(value) + opts[key] = value + + self._check_vdisk_opts(opts) + return opts + + def _create_vdisk(self, name, size, units, opts): + """Create a new vdisk.""" + + LOG.debug(_('enter: _create_vdisk: vdisk %s ') % name) + + model_update = None + autoex = '-autoexpand' if opts['autoexpand'] else '' + easytier = '-easytier on' if opts['easytier'] else '-easytier off' + + # Set space-efficient options + if opts['rsize'] == -1: + ssh_cmd_se_opt = '' + else: + ssh_cmd_se_opt = ( + '-rsize %(rsize)d%% %(autoex)s -warning %(warn)d%%' % + {'rsize': opts['rsize'], + 'autoex': autoex, + 'warn': opts['warning']}) + if opts['compression']: + ssh_cmd_se_opt = ssh_cmd_se_opt + ' -compressed' + else: + ssh_cmd_se_opt = ssh_cmd_se_opt + ( + ' -grainsize %d' % opts['grainsize']) + + ssh_cmd = ('svctask mkvdisk -name %(name)s -mdiskgrp %(mdiskgrp)s ' + '-iogrp 0 -size %(size)s -unit ' + '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s' + % {'name': name, + 'mdiskgrp': self.configuration.storwize_svc_volpool_name, + 'size': size, 'unit': units, 'easytier': easytier, + 'ssh_cmd_se_opt': ssh_cmd_se_opt}) + out, err = self._run_ssh(ssh_cmd) + self._assert_ssh_return(len(out.strip()), '_create_vdisk', + ssh_cmd, out, err) + + # Ensure that the output is as expected + match_obj = re.search('Virtual Disk, id \[([0-9]+)\], ' + 'successfully created', out) + # Make sure we got a "successfully created" message with vdisk id + self._driver_assert( + match_obj is not None, + _('_create_vdisk %(name)s - did not find ' + 'success message in CLI output.\n ' + 'stdout: %(out)s\n stderr: %(err)s') + % {'name': name, 'out': str(out), 'err': str(err)}) + + LOG.debug(_('leave: _create_vdisk: volume %s ') % name) + + def _make_fc_map(self, source, target, full_copy): + copyflag = '' if full_copy else '-copyrate 0' + fc_map_cli_cmd = ('svctask mkfcmap -source %(src)s -target %(tgt)s ' + '-autodelete %(copyflag)s' % + {'src': source, + 'tgt': target, + 'copyflag': copyflag}) + out, err = self._run_ssh(fc_map_cli_cmd) + self._driver_assert( + len(out.strip()), + _('create FC mapping from %(source)s to %(target)s - ' + 'did not find success message in CLI output.\n' + ' stdout: %(out)s\n stderr: %(err)s\n') + % {'source': source, + 'target': target, + 'out': str(out), + 'err': str(err)}) + + # Ensure that the output is as expected + match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], ' + 'successfully created', out) + # Make sure we got a "successfully created" message with vdisk id + self._driver_assert( + match_obj is not None, + _('create FC mapping from %(source)s to %(target)s - ' + 'did not find success message in CLI output.\n' + ' stdout: %(out)s\n stderr: %(err)s\n') + % {'source': source, + 'target': target, + 'out': str(out), + 'err': str(err)}) + + try: + fc_map_id = match_obj.group(1) + self._driver_assert( + fc_map_id is not None, + _('create FC mapping from %(source)s to %(target)s - ' + 'did not find mapping id in CLI output.\n' + ' stdout: %(out)s\n stderr: %(err)s\n') + % {'source': source, + 'target': target, + 'out': str(out), + 'err': str(err)}) + except IndexError: + self._driver_assert( + False, + _('create FC mapping from %(source)s to %(target)s - ' + 'did not find mapping id in CLI output.\n' + ' stdout: %(out)s\n stderr: %(err)s\n') + % {'source': source, + 'target': target, + 'out': str(out), + 'err': str(err)}) + return fc_map_id + + def _call_prepare_fc_map(self, fc_map_id, source, target): + try: + out, err = self._run_ssh('svctask prestartfcmap %s' % fc_map_id) + except exception.ProcessExecutionError as e: + with excutils.save_and_reraise_exception(): + LOG.error(_('_prepare_fc_map: Failed to prepare FlashCopy ' + 'from %(source)s to %(target)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'source': source, + 'target': target, + 'out': e.stdout, + 'err': e.stderr}) + + def _prepare_fc_map(self, fc_map_id, source, target): + self._call_prepare_fc_map(fc_map_id, source, target) + mapping_ready = False + wait_time = 5 + # Allow waiting of up to timeout (set as parameter) + timeout = self.configuration.storwize_svc_flashcopy_timeout + max_retries = (timeout / wait_time) + 1 + for try_number in range(1, max_retries): + mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) + if (mapping_attrs is None or + 'status' not in mapping_attrs): + break + if mapping_attrs['status'] == 'prepared': + mapping_ready = True + break + elif mapping_attrs['status'] == 'stopped': + self._call_prepare_fc_map(fc_map_id, source, target) + elif mapping_attrs['status'] != 'preparing': + # Unexpected mapping status + exception_msg = (_('Unexecpted mapping status %(status)s ' + 'for mapping %(id)s. Attributes: ' + '%(attr)s') + % {'status': mapping_attrs['status'], + 'id': fc_map_id, + 'attr': mapping_attrs}) + raise exception.VolumeBackendAPIException(data=exception_msg) + # Need to wait for mapping to be prepared, wait a few seconds + time.sleep(wait_time) + + if not mapping_ready: + exception_msg = (_('Mapping %(id)s prepare failed to complete ' + 'within the allotted %(to)d seconds timeout. ' + 'Terminating.') + % {'id': fc_map_id, + 'to': timeout}) + LOG.error(_('_prepare_fc_map: Failed to start FlashCopy ' + 'from %(source)s to %(target)s with ' + 'exception %(ex)s') + % {'source': source, + 'target': target, + 'ex': exception_msg}) + raise exception.InvalidSnapshot( + reason=_('_prepare_fc_map: %s') % exception_msg) + + def _start_fc_map(self, fc_map_id, source, target): + try: + out, err = self._run_ssh('svctask startfcmap %s' % fc_map_id) + except exception.ProcessExecutionError as e: + with excutils.save_and_reraise_exception(): + LOG.error(_('_start_fc_map: Failed to start FlashCopy ' + 'from %(source)s to %(target)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'source': source, + 'target': target, + 'out': e.stdout, + 'err': e.stderr}) + + def _run_flashcopy(self, source, target, full_copy=True): + """Create a FlashCopy mapping from the source to the target.""" + + LOG.debug(_('enter: _run_flashcopy: execute FlashCopy from source ' + '%(source)s to target %(target)s') % + {'source': source, 'target': target}) + + fc_map_id = self._make_fc_map(source, target, full_copy) + try: + self._prepare_fc_map(fc_map_id, source, target) + self._start_fc_map(fc_map_id, source, target) + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_vdisk(target, True) + + LOG.debug(_('leave: _run_flashcopy: FlashCopy started from ' + '%(source)s to %(target)s') % + {'source': source, 'target': target}) + + def _create_copy(self, src_vdisk, tgt_vdisk, full_copy, opts, src_id, + from_vol): + """Create a new snapshot using FlashCopy.""" + + LOG.debug(_('enter: _create_copy: snapshot %(tgt_vdisk)s from ' + 'vdisk %(src_vdisk)s') % + {'tgt_vdisk': tgt_vdisk, 'src_vdisk': src_vdisk}) + + src_vdisk_attributes = self._get_vdisk_attributes(src_vdisk) + if src_vdisk_attributes is None: + exception_msg = ( + _('_create_copy: Source vdisk %s does not exist') + % src_vdisk) + LOG.error(exception_msg) + if from_vol: + raise exception.VolumeNotFound(exception_msg, + volume_id=src_id) + else: + raise exception.SnapshotNotFound(exception_msg, + snapshot_id=src_id) + + self._driver_assert( + 'capacity' in src_vdisk_attributes, + _('_create_copy: cannot get source vdisk ' + '%(src)s capacity from vdisk attributes ' + '%(attr)s') + % {'src': src_vdisk, + 'attr': src_vdisk_attributes}) + + src_vdisk_size = src_vdisk_attributes['capacity'] + self._create_vdisk(tgt_vdisk, src_vdisk_size, 'b', opts) + self._run_flashcopy(src_vdisk, tgt_vdisk, full_copy) + + LOG.debug(_('leave: _create_copy: snapshot %(tgt_vdisk)s from ' + 'vdisk %(src_vdisk)s') % + {'tgt_vdisk': tgt_vdisk, 'src_vdisk': src_vdisk}) + + def _get_flashcopy_mapping_attributes(self, fc_map_id): + LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s') + % fc_map_id) + + fc_ls_map_cmd = 'svcinfo lsfcmap -filtervalue id=%s -delim !' % \ + fc_map_id + out, err = self._run_ssh(fc_ls_map_cmd) + if not len(out.strip()): + return None + + # Get list of FlashCopy mappings + # We expect zero or one line if mapping does not exist, + # two lines if it does exist, otherwise error + lines = out.strip().split('\n') + self._assert_ssh_return(len(lines) <= 2, + '_get_flashcopy_mapping_attributes', + fc_ls_map_cmd, out, err) + + if len(lines) == 2: + attributes = self._get_hdr_dic(lines[0], lines[1], '!') + else: # 0 or 1 lines + attributes = None + + LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping ' + '%(fc_map_id)s, attributes %(attributes)s') % + {'fc_map_id': fc_map_id, 'attributes': attributes}) + + return attributes + + def _is_vdisk_defined(self, vdisk_name): + """Check if vdisk is defined.""" + LOG.debug(_('enter: _is_vdisk_defined: vdisk %s ') % vdisk_name) + vdisk_attributes = self._get_vdisk_attributes(vdisk_name) + LOG.debug(_('leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s ') + % {'vol': vdisk_name, + 'str': vdisk_attributes is not None}) + if vdisk_attributes is None: + return False + else: + return True + + def _delete_vdisk(self, name, force): + """Deletes existing vdisks. + + It is very important to properly take care of mappings before deleting + the disk: + 1. If no mappings, then it was a vdisk, and can be deleted + 2. If it is the source of a flashcopy mapping and copy_rate is 0, then + it is a vdisk that has a snapshot. If the force flag is set, + delete the mapping and the vdisk, otherwise set the mapping to + copy and wait (this will allow users to delete vdisks that have + snapshots if/when the upper layers allow it). + 3. If it is the target of a mapping and copy_rate is 0, it is a + snapshot, and we should properly stop the mapping and delete. + 4. If it is the source/target of a mapping and copy_rate is not 0, it + is a clone or vdisk created from a snapshot. We wait for the copy + to complete (the mapping will be autodeleted) and then delete the + vdisk. + + """ + + LOG.debug(_('enter: _delete_vdisk: vdisk %s') % name) + + # Try to delete volume only if found on the storage + vdisk_defined = self._is_vdisk_defined(name) + if not vdisk_defined: + LOG.info(_('warning: Tried to delete vdisk %s but it does not ' + 'exist.') % name) + return + + # Ensure vdisk has no FlashCopy mappings + mapping_ids = self._get_vdisk_fc_mappings(name) + while len(mapping_ids): + wait_for_copy = False + for map_id in mapping_ids: + attrs = self._get_flashcopy_mapping_attributes(map_id) + if not attrs: + continue + source = attrs['source_vdisk_name'] + target = attrs['target_vdisk_name'] + copy_rate = attrs['copy_rate'] + status = attrs['status'] + + if copy_rate == '0': + # Case #2: A vdisk that has snapshots + if source == name: + ssh_cmd = ('svctask chfcmap -copyrate 50 ' + '-autodelete on %s' % map_id) + out, err = self._run_ssh(ssh_cmd) + wait_for_copy = True + # Case #3: A snapshot + else: + msg = (_('Vdisk %(name)s not involved in ' + 'mapping %(src)s -> %(tgt)s') % + {'name': name, 'src': source, 'tgt': target}) + self._driver_assert(target == name, msg) + if status in ['copying', 'prepared']: + self._run_ssh('svctask stopfcmap %s' % map_id) + elif status == 'stopping': + wait_for_copy = True + else: + self._run_ssh('svctask rmfcmap -force %s' % map_id) + # Case 4: Copy in progress - wait and will autodelete + else: + if status == 'prepared': + self._run_ssh('svctask stopfcmap %s' % map_id) + self._run_ssh('svctask rmfcmap -force %s' % map_id) + elif status == 'idle_or_copied': + # Prepare failed + self._run_ssh('svctask rmfcmap -force %s' % map_id) + else: + wait_for_copy = True + if wait_for_copy: + time.sleep(5) + mapping_ids = self._get_vdisk_fc_mappings(name) + + forceflag = '-force' if force else '' + cmd_params = {'frc': forceflag, 'name': name} + ssh_cmd = 'svctask rmvdisk %(frc)s %(name)s' % cmd_params + out, err = self._run_ssh(ssh_cmd) + # No output should be returned from rmvdisk + self._assert_ssh_return(len(out.strip()) == 0, + ('_delete_vdisk %(name)s') + % {'name': name}, + ssh_cmd, out, err) + LOG.debug(_('leave: _delete_vdisk: vdisk %s') % name) + + def create_volume(self, volume): + opts = self._get_vdisk_params(volume['volume_type_id']) + return self._create_vdisk(volume['name'], str(volume['size']), 'gb', + opts) + + def delete_volume(self, volume): + self._delete_vdisk(volume['name'], False) + + def create_snapshot(self, snapshot): + source_vol = self.db.volume_get(self._context, snapshot['volume_id']) + opts = self._get_vdisk_params(source_vol['volume_type_id']) + self._create_copy(src_vdisk=snapshot['volume_name'], + tgt_vdisk=snapshot['name'], + full_copy=False, + opts=opts, + src_id=snapshot['volume_id'], + from_vol=True) + + def delete_snapshot(self, snapshot): + self._delete_vdisk(snapshot['name'], False) + + def create_volume_from_snapshot(self, volume, snapshot): + if volume['size'] != snapshot['volume_size']: + exception_message = (_('create_volume_from_snapshot: ' + 'Source and destination size differ.')) + raise exception.VolumeBackendAPIException(data=exception_message) + + opts = self._get_vdisk_params(volume['volume_type_id']) + self._create_copy(src_vdisk=snapshot['name'], + tgt_vdisk=volume['name'], + full_copy=True, + opts=opts, + src_id=snapshot['id'], + from_vol=False) + + def create_cloned_volume(self, tgt_volume, src_volume): + if src_volume['size'] != tgt_volume['size']: + exception_message = (_('create_cloned_volume: ' + 'Source and destination size differ.')) + raise exception.VolumeBackendAPIException(data=exception_message) + + opts = self._get_vdisk_params(tgt_volume['volume_type_id']) + self._create_copy(src_vdisk=src_volume['name'], + tgt_vdisk=tgt_volume['name'], + full_copy=True, + opts=opts, + src_id=src_volume['id'], + from_vol=True) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + opts = self._get_vdisk_params(volume['volume_type_id']) + if opts['protocol'] == 'iSCSI': + # Implemented in base iSCSI class + return super(StorwizeSVCDriver, self).copy_image_to_volume( + context, volume, image_service, image_id) + else: + raise NotImplementedError() + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + opts = self._get_vdisk_params(volume['volume_type_id']) + if opts['protocol'] == 'iSCSI': + # Implemented in base iSCSI class + return super(StorwizeSVCDriver, self).copy_volume_to_image( + context, volume, image_service, image_meta) + else: + raise NotImplementedError() + + """=====================================================================""" + """ MISC/HELPERS """ + """=====================================================================""" + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If we haven't gotten stats yet or 'refresh' is True, + run update the stats first.""" + if not self._stats or refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """Retrieve status info from volume group.""" + + LOG.debug(_("Updating volume status")) + data = {} + + data['vendor_name'] = 'IBM' + data['driver_version'] = '1.1' + data['storage_protocol'] = list(self._enabled_protocols) + + data['total_capacity_gb'] = 0 # To be overwritten + data['free_capacity_gb'] = 0 # To be overwritten + data['reserved_percentage'] = 0 + data['QoS_support'] = False + + pool = self.configuration.storwize_svc_volpool_name + #Get storage system name + ssh_cmd = 'svcinfo lssystem -delim !' + attributes = self._execute_command_and_parse_attributes(ssh_cmd) + if not attributes or not attributes['name']: + exception_message = (_('_update_volume_status: ' + 'Could not get system name')) + raise exception.VolumeBackendAPIException(data=exception_message) + + backend_name = self.configuration.safe_get('volume_backend_name') + if not backend_name: + backend_name = '%s_%s' % (attributes['name'], pool) + data['volume_backend_name'] = backend_name + + ssh_cmd = 'svcinfo lsmdiskgrp -bytes -delim ! %s' % pool + attributes = self._execute_command_and_parse_attributes(ssh_cmd) + if not attributes: + LOG.error(_('Could not get pool data from the storage')) + exception_message = (_('_update_volume_status: ' + 'Could not get storage pool data')) + raise exception.VolumeBackendAPIException(data=exception_message) + + data['total_capacity_gb'] = (float(attributes['capacity']) / + (1024 ** 3)) + data['free_capacity_gb'] = (float(attributes['free_capacity']) / + (1024 ** 3)) + data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto'] + data['compression_support'] = self._compression_enabled + + self._stats = data + + def _port_conf_generator(self, cmd): + ssh_cmd = '%s -delim !' % cmd + out, err = self._run_ssh(ssh_cmd) + + if not len(out.strip()): + return + port_lines = out.strip().split('\n') + if not len(port_lines): + return + + header = port_lines.pop(0) + yield header + for portip_line in port_lines: + try: + port_data = self._get_hdr_dic(header, portip_line, '!') + except exception.VolumeBackendAPIException: + with excutils.save_and_reraise_exception(): + self._log_cli_output_error('_port_conf_generator', + ssh_cmd, out, err) + yield port_data + + def _check_vdisk_opts(self, opts): + # Check that rsize is either -1 or between 0 and 100 + if not (opts['rsize'] >= -1 and opts['rsize'] <= 100): + raise exception.InvalidInput( + reason=_('Illegal value specified for storwize_svc_vol_rsize: ' + 'set to either a percentage (0-100) or -1')) + + # Check that warning is either -1 or between 0 and 100 + if not (opts['warning'] >= -1 and opts['warning'] <= 100): + raise exception.InvalidInput( + reason=_('Illegal value specified for ' + 'storwize_svc_vol_warning: ' + 'set to a percentage (0-100)')) + + # Check that grainsize is 32/64/128/256 + if opts['grainsize'] not in [32, 64, 128, 256]: + raise exception.InvalidInput( + reason=_('Illegal value specified for ' + 'storwize_svc_vol_grainsize: set to either ' + '32, 64, 128, or 256')) + + # Check that compression is supported + if opts['compression'] and not self._compression_enabled: + raise exception.InvalidInput( + reason=_('System does not support compression')) + + # Check that rsize is set if compression is set + if opts['compression'] and opts['rsize'] == -1: + raise exception.InvalidInput( + reason=_('If compression is set to True, rsize must ' + 'also be set (not equal to -1)')) + + # Check that the requested protocol is enabled + if opts['protocol'] not in self._enabled_protocols: + raise exception.InvalidInput( + reason=_('Illegal value %(prot)s specified for ' + 'storwize_svc_connection_protocol: ' + 'valid values are %(enabled)s') + % {'prot': opts['protocol'], + 'enabled': ','.join(self._enabled_protocols)}) + + # Check that multipath is only enabled for fc + if opts['protocol'] != 'FC' and opts['multipath']: + raise exception.InvalidInput( + reason=_('Multipath is currently only supported for FC ' + 'connections and not iSCSI. (This is a Nova ' + 'limitation.)')) + + def _execute_command_and_parse_attributes(self, ssh_cmd): + """Execute command on the Storwize/SVC and parse attributes. + + Exception is raised if the information from the system + can not be obtained. + + """ + + LOG.debug(_('enter: _execute_command_and_parse_attributes: ' + ' command %s') % ssh_cmd) + + try: + out, err = self._run_ssh(ssh_cmd) + except exception.ProcessExecutionError as e: + # Didn't get details from the storage, return None + LOG.error(_('CLI Exception output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s') % + {'cmd': ssh_cmd, + 'out': e.stdout, + 'err': e.stderr}) + return None + + self._assert_ssh_return(len(out), + '_execute_command_and_parse_attributes', + ssh_cmd, out, err) + attributes = {} + for attrib_line in out.split('\n'): + # If '!' not found, return the string and two empty strings + attrib_name, foo, attrib_value = attrib_line.partition('!') + if attrib_name is not None and len(attrib_name.strip()): + attributes[attrib_name] = attrib_value + + LOG.debug(_('leave: _execute_command_and_parse_attributes:\n' + 'command: %(cmd)s\n' + 'attributes: %(attr)s') + % {'cmd': ssh_cmd, + 'attr': str(attributes)}) + + return attributes + + def _get_hdr_dic(self, header, row, delim): + """Return CLI row data as a dictionary indexed by names from header. + string. The strings are converted to columns using the delimiter in + delim. + """ + + attributes = header.split(delim) + values = row.split(delim) + self._driver_assert( + len(values) == + len(attributes), + _('_get_hdr_dic: attribute headers and values do not match.\n ' + 'Headers: %(header)s\n Values: %(row)s') + % {'header': str(header), + 'row': str(row)}) + dic = {} + for attribute, value in map(None, attributes, values): + dic[attribute] = value + return dic + + def _log_cli_output_error(self, function, cmd, out, err): + LOG.error(_('%(fun)s: Failed with unexpected CLI output.\n ' + 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n') + % {'fun': function, 'cmd': cmd, + 'out': str(out), 'err': str(err)}) + + def _driver_assert(self, assert_condition, exception_message): + """Internal assertion mechanism for CLI output.""" + if not assert_condition: + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _assert_ssh_return(self, test, fun, ssh_cmd, out, err): + self._driver_assert( + test, + _('%(fun)s: Failed with unexpected CLI output.\n ' + 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') + % {'fun': fun, + 'cmd': ssh_cmd, + 'out': str(out), + 'err': str(err)}) + + def _handle_keyerror(self, function, header): + msg = (_('Did not find expected column in %(fun)s: %(hdr)s') % + {'fun': function, 'hdr': header}) + LOG.error(msg) + raise exception.VolumeBackendAPIException( + data=msg) + + +class CLIResponse(object): + '''Parse SVC CLI output and generate iterable''' + + def __init__(self, raw, delim='!', with_header=True): + super(CLIResponse, self).__init__() + self.raw = raw + self.delim = delim + self.with_header = with_header + self.result = self._parse() + + def select(self, *keys): + for a in self.result: + vs = [] + for k in keys: + v = a.get(k, None) + if isinstance(v, basestring): + v = [v] + if isinstance(v, list): + vs.append(v) + for item in zip(*vs): + yield item + + def __getitem__(self, key): + return self.result[key] + + def __iter__(self): + for a in self.result: + yield a + + def __len__(self): + return len(self.result) + + def _parse(self): + def get_reader(content, delim): + for line in content.lstrip().splitlines(): + line = line.strip() + if line: + yield line.split(delim) + else: + yield [] + + if isinstance(self.raw, basestring): + stdout, stderr = self.raw, '' + else: + stdout, stderr = self.raw + reader = get_reader(stdout, self.delim) + result = [] + + if self.with_header: + hds = tuple() + for row in reader: + hds = row + break + for row in reader: + cur = dict() + for k, v in zip(hds, row): + CLIResponse.append_dict(cur, k, v) + result.append(cur) + else: + cur = dict() + for row in reader: + if row: + CLIResponse.append_dict(cur, row[0], ' '.join(row[1:])) + elif cur: # start new section + result.append(cur) + cur = dict() + if cur: + result.append(cur) + return result + + @staticmethod + def append_dict(dict_, key, value): + key, value = key.strip(), value.strip() + obj = dict_.get(key, None) + if obj is None: + dict_[key] = value + elif isinstance(obj, list): + obj.append(value) + dict_[key] = obj + else: + dict_[key] = [obj, value] + return dict_ diff --git a/cinder/volume/drivers/windows.py b/cinder/volume/drivers/windows.py new file mode 100644 index 0000000000..a0113b1535 --- /dev/null +++ b/cinder/volume/drivers/windows.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for Windows Server 2012 + +This driver requires ISCSI target role installed + +""" +import os +import sys + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.volume import driver + +# Check needed for unit testing on Unix +if os.name == 'nt': + import wmi + + +LOG = logging.getLogger(__name__) + +FLAGS = flags.FLAGS + +windows_opts = [ + cfg.StrOpt('windows_iscsi_lun_path', + default='C:\iSCSIVirtualDisks', + help='Path to store VHD backed volumes'), +] + +FLAGS.register_opts(windows_opts) + + +class WindowsDriver(driver.ISCSIDriver): + """Executes volume driver commands on Windows Storage server.""" + + def __init__(self, *args, **kwargs): + super(WindowsDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Setup the Windows Volume driver. + + Called one time by the manager after the driver is loaded. + Validate the flags we care about + """ + #Set the flags + self._conn_wmi = wmi.WMI(moniker='//./root/wmi') + self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') + + def check_for_setup_error(self): + """Check that the driver is working and can communicate. + """ + #Invoking the portal an checking that is listening + wt_portal = self._conn_wmi.WT_Portal()[0] + listen = wt_portal.Listen + if not listen: + raise exception.VolumeBackendAPIException() + + def initialize_connection(self, volume, connector): + """Driver entry point to attach a volume to an instance. + """ + initiator_name = connector['initiator'] + target_name = volume['provider_location'] + + cl = self._conn_wmi.__getattr__("WT_IDMethod") + wt_idmethod = cl.new() + wt_idmethod.HostName = target_name + wt_idmethod.Method = 4 + wt_idmethod.Value = initiator_name + wt_idmethod.put() + #Getting the portal and port information + wt_portal = self._conn_wmi.WT_Portal()[0] + (address, port) = (wt_portal.Address, wt_portal.Port) + #Getting the host information + hosts = self._conn_wmi.WT_Host(Hostname=target_name) + host = hosts[0] + + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = host.TargetIQN + properties['target_lun'] = 0 + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance. + + Unmask the LUN on the storage system so the given intiator can no + longer access it. + """ + initiator_name = connector['initiator'] + provider_location = volume['provider_location'] + #DesAssigning target to initiators + wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=provider_location, + Method=4, + Value=initiator_name)[0] + wt_idmethod.Delete_() + + def create_volume(self, volume): + """Driver entry point for creating a new volume.""" + vhd_path = self._get_vhd_path(volume) + vol_name = volume['name'] + #The WMI procedure returns a Generic failure + cl = self._conn_wmi.__getattr__("WT_Disk") + cl.NewWTDisk(DevicePath=vhd_path, + Description=vol_name, + SizeInMB=volume['size'] * 1024) + + def _get_vhd_path(self, volume): + base_vhd_folder = FLAGS.windows_iscsi_lun_path + if not os.path.exists(base_vhd_folder): + LOG.debug(_('Creating folder %s '), base_vhd_folder) + os.makedirs(base_vhd_folder) + return os.path.join(base_vhd_folder, str(volume['name']) + ".vhd") + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + vol_name = volume['name'] + wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0] + wt_disk.Delete_() + vhdfiles = self._conn_cimv2.query( + "Select * from CIM_DataFile where Name = '" + + self._get_vhd_path(volume) + "'") + if len(vhdfiles) > 0: + vhdfiles[0].Delete() + + def create_snapshot(self, snapshot): + """Driver entry point for creating a snapshot. + """ + #Getting WT_Snapshot class + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + + wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0] + #API Calls gets Generic Failure + cl = self._conn_wmi.__getattr__("WT_Snapshot") + disk_id = wt_disk.WTD + out = cl.Create(WTD=disk_id) + #Setting description since it used as a KEY + wt_snapshot_created = self._conn_wmi.WT_Snapshot(Id=out[0])[0] + wt_snapshot_created.Description = snapshot_name + wt_snapshot_created.put() + + def create_volume_from_snapshot(self, volume, snapshot): + """Driver entry point for exporting snapshots as volumes.""" + snapshot_name = snapshot['name'] + wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snapshot_name)[0] + disk_id = wt_snapshot.Export()[0] + wt_disk = self._conn_wmi.WT_Disk(WTD=disk_id)[0] + wt_disk.Description = volume['name'] + wt_disk.put() + + def delete_snapshot(self, snapshot): + """Driver entry point for deleting a snapshot.""" + snapshot_name = snapshot['name'] + wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snapshot_name)[0] + wt_snapshot.Delete_() + + def _do_export(self, _ctx, volume, ensure=False): + """Do all steps to get disk exported as LUN 0 at separate target. + + :param volume: reference of volume to be exported + :param ensure: if True, ignore errors caused by already existing + resources + :return: iscsiadm-formatted provider location string + """ + target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + #ISCSI target creation + try: + cl = self._conn_wmi.__getattr__("WT_Host") + cl.NewHost(HostName=target_name) + except Exception as exc: + excep_info = exc.com_error.excepinfo[2] + if not ensure or excep_info.find(u'The file exists') == -1: + raise + else: + LOG.info(_('Ignored target creation error "%s"' + ' while ensuring export'), exc) + #Get the disk to add + vol_name = volume['name'] + q = self._conn_wmi.WT_Disk(Description=vol_name) + if not len(q): + LOG.debug(_('Disk not found: %s'), vol_name) + return None + wt_disk = q[0] + wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0] + wt_host.AddWTDisk(wt_disk.WTD) + + return target_name + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + self._do_export(context, volume, ensure=True) + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + loc = self._do_export(context, volume, ensure=False) + return {'provider_location': loc} + + def remove_export(self, context, volume): + """Driver exntry point to remove an export for a volume. + """ + target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + + #Get ISCSI target + wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0] + wt_host.RemoveAllWTDisks() + wt_host.Delete_() + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + raise NotImplementedError() + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + raise NotImplementedError() diff --git a/cinder/volume/drivers/xenapi/__init__.py b/cinder/volume/drivers/xenapi/__init__.py new file mode 100644 index 0000000000..4549abf922 --- /dev/null +++ b/cinder/volume/drivers/xenapi/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/volume/drivers/xenapi/lib.py b/cinder/volume/drivers/xenapi/lib.py new file mode 100644 index 0000000000..4c48fc2dbb --- /dev/null +++ b/cinder/volume/drivers/xenapi/lib.py @@ -0,0 +1,542 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.volume.drivers.xenapi import tools +import contextlib +import os +import pickle + + +class XenAPIException(Exception): + def __init__(self, original_exception): + super(XenAPIException, self).__init__(str(original_exception)) + self.original_exception = original_exception + + +class OperationsBase(object): + def __init__(self, xenapi_session): + self.session = xenapi_session + + def call_xenapi(self, method, *args): + return self.session.call_xenapi(method, *args) + + +class VMOperations(OperationsBase): + def get_by_uuid(self, vm_uuid): + return self.call_xenapi('VM.get_by_uuid', vm_uuid) + + def get_vbds(self, vm_uuid): + return self.call_xenapi('VM.get_VBDs', vm_uuid) + + +class VBDOperations(OperationsBase): + def create(self, vm_ref, vdi_ref, userdevice, bootable, mode, type, + empty, other_config): + vbd_rec = dict( + VM=vm_ref, + VDI=vdi_ref, + userdevice=str(userdevice), + bootable=bootable, + mode=mode, + type=type, + empty=empty, + other_config=other_config, + qos_algorithm_type='', + qos_algorithm_params=dict() + ) + return self.call_xenapi('VBD.create', vbd_rec) + + def destroy(self, vbd_ref): + self.call_xenapi('VBD.destroy', vbd_ref) + + def get_device(self, vbd_ref): + return self.call_xenapi('VBD.get_device', vbd_ref) + + def plug(self, vbd_ref): + return self.call_xenapi('VBD.plug', vbd_ref) + + def unplug(self, vbd_ref): + return self.call_xenapi('VBD.unplug', vbd_ref) + + def get_vdi(self, vbd_ref): + return self.call_xenapi('VBD.get_VDI', vbd_ref) + + +class PoolOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('pool.get_all') + + def get_default_SR(self, pool_ref): + return self.call_xenapi('pool.get_default_SR', pool_ref) + + +class PbdOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('PBD.get_all') + + def unplug(self, pbd_ref): + self.call_xenapi('PBD.unplug', pbd_ref) + + def create(self, host_ref, sr_ref, device_config): + return self.call_xenapi( + 'PBD.create', + dict( + host=host_ref, + SR=sr_ref, + device_config=device_config + ) + ) + + def plug(self, pbd_ref): + self.call_xenapi('PBD.plug', pbd_ref) + + +class SrOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('SR.get_all') + + def get_record(self, sr_ref): + return self.call_xenapi('SR.get_record', sr_ref) + + def forget(self, sr_ref): + self.call_xenapi('SR.forget', sr_ref) + + def scan(self, sr_ref): + self.call_xenapi('SR.scan', sr_ref) + + def create(self, host_ref, device_config, name_label, name_description, + sr_type, physical_size=None, content_type=None, + shared=False, sm_config=None): + return self.call_xenapi( + 'SR.create', + host_ref, + device_config, + physical_size or '0', + name_label or '', + name_description or '', + sr_type, + content_type or '', + shared, + sm_config or dict() + ) + + def introduce(self, sr_uuid, name_label, name_description, sr_type, + content_type=None, shared=False, sm_config=None): + return self.call_xenapi( + 'SR.introduce', + sr_uuid, + name_label or '', + name_description or '', + sr_type, + content_type or '', + shared, + sm_config or dict() + ) + + def get_uuid(self, sr_ref): + return self.get_record(sr_ref)['uuid'] + + def get_name_label(self, sr_ref): + return self.get_record(sr_ref)['name_label'] + + def get_name_description(self, sr_ref): + return self.get_record(sr_ref)['name_description'] + + def destroy(self, sr_ref): + self.call_xenapi('SR.destroy', sr_ref) + + +class VdiOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('VDI.get_all') + + def get_record(self, vdi_ref): + return self.call_xenapi('VDI.get_record', vdi_ref) + + def get_by_uuid(self, vdi_uuid): + return self.call_xenapi('VDI.get_by_uuid', vdi_uuid) + + def get_uuid(self, vdi_ref): + return self.get_record(vdi_ref)['uuid'] + + def create(self, sr_ref, size, vdi_type, + sharable=False, read_only=False, other_config=None): + return self.call_xenapi('VDI.create', + dict(SR=sr_ref, + virtual_size=str(size), + type=vdi_type, + sharable=sharable, + read_only=read_only, + other_config=other_config or dict())) + + def destroy(self, vdi_ref): + self.call_xenapi('VDI.destroy', vdi_ref) + + def copy(self, vdi_ref, sr_ref): + return self.call_xenapi('VDI.copy', vdi_ref, sr_ref) + + def resize(self, vdi_ref, size): + return self.call_xenapi('VDI.resize', vdi_ref, str(size)) + + +class HostOperations(OperationsBase): + def get_record(self, host_ref): + return self.call_xenapi('host.get_record', host_ref) + + def get_uuid(self, host_ref): + return self.get_record(host_ref)['uuid'] + + +class XenAPISession(object): + def __init__(self, session, exception_to_convert): + self._session = session + self._exception_to_convert = exception_to_convert + self.handle = self._session.handle + self.PBD = PbdOperations(self) + self.SR = SrOperations(self) + self.VDI = VdiOperations(self) + self.host = HostOperations(self) + self.pool = PoolOperations(self) + self.VBD = VBDOperations(self) + self.VM = VMOperations(self) + + def close(self): + return self.call_xenapi('logout') + + @contextlib.contextmanager + def exception_converter(self): + try: + yield None + except self._exception_to_convert as e: + raise XenAPIException(e) + + def call_xenapi(self, method, *args): + with self.exception_converter(): + return self._session.xenapi_request(method, args) + + def call_plugin(self, host_ref, plugin, function, args): + with self.exception_converter(): + return self._session.xenapi.host.call_plugin( + host_ref, plugin, function, args) + + def get_pool(self): + return self.call_xenapi('session.get_pool', self.handle) + + def get_this_host(self): + return self.call_xenapi('session.get_this_host', self.handle) + + +class CompoundOperations(object): + def unplug_pbds_from_sr(self, sr_ref): + sr_rec = self.SR.get_record(sr_ref) + for pbd_ref in sr_rec.get('PBDs', []): + self.PBD.unplug(pbd_ref) + + def unplug_pbds_and_forget_sr(self, sr_ref): + self.unplug_pbds_from_sr(sr_ref) + self.SR.forget(sr_ref) + + def create_new_vdi(self, sr_ref, size_in_gigabytes): + return self.VDI.create(sr_ref, + to_bytes(size_in_gigabytes), + 'User', ) + + +def to_bytes(size_in_gigs): + return size_in_gigs * 1024 * 1024 * 1024 + + +class NFSOperationsMixIn(CompoundOperations): + def is_nfs_sr(self, sr_ref): + return self.SR.get_record(sr_ref).get('type') == 'nfs' + + @contextlib.contextmanager + def new_sr_on_nfs(self, host_ref, server, serverpath, + name_label=None, name_description=None): + + device_config = dict( + server=server, + serverpath=serverpath + ) + name_label = name_label or '' + name_description = name_description or '' + sr_type = 'nfs' + + sr_ref = self.SR.create( + host_ref, + device_config, + name_label, + name_description, + sr_type, + ) + yield sr_ref + + self.unplug_pbds_and_forget_sr(sr_ref) + + def plug_nfs_sr(self, host_ref, server, serverpath, sr_uuid, + name_label=None, name_description=None): + + device_config = dict( + server=server, + serverpath=serverpath + ) + sr_type = 'nfs' + + sr_ref = self.SR.introduce( + sr_uuid, + name_label, + name_description, + sr_type, + ) + + pbd_ref = self.PBD.create( + host_ref, + sr_ref, + device_config + ) + + self.PBD.plug(pbd_ref) + + return sr_ref + + def connect_volume(self, server, serverpath, sr_uuid, vdi_uuid): + host_ref = self.get_this_host() + sr_ref = self.plug_nfs_sr( + host_ref, + server, + serverpath, + sr_uuid + ) + self.SR.scan(sr_ref) + vdi_ref = self.VDI.get_by_uuid(vdi_uuid) + return dict(sr_ref=sr_ref, vdi_ref=vdi_ref) + + def copy_vdi_to_sr(self, vdi_ref, sr_ref): + return self.VDI.copy(vdi_ref, sr_ref) + + +class ContextAwareSession(XenAPISession): + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +class OpenStackXenAPISession(ContextAwareSession, + NFSOperationsMixIn): + pass + + +def connect(url, user, password): + import XenAPI + session = XenAPI.Session(url) + session.login_with_password(user, password) + return OpenStackXenAPISession(session, XenAPI.Failure) + + +class SessionFactory(object): + def __init__(self, url, user, password): + self.url = url + self.user = user + self.password = password + + def get_session(self): + return connect(self.url, self.user, self.password) + + +class XapiPluginProxy(object): + def __init__(self, session_factory, plugin_name): + self._session_factory = session_factory + self._plugin_name = plugin_name + + def call(self, function, *plugin_args, **plugin_kwargs): + plugin_params = dict(args=plugin_args, kwargs=plugin_kwargs) + args = dict(params=pickle.dumps(plugin_params)) + + with self._session_factory.get_session() as session: + host_ref = session.get_this_host() + result = session.call_plugin( + host_ref, self._plugin_name, function, args) + + return pickle.loads(result) + + +class GlancePluginProxy(XapiPluginProxy): + def __init__(self, session_factory): + super(GlancePluginProxy, self).__init__(session_factory, 'glance') + + def download_vhd(self, image_id, glance_host, glance_port, glance_use_ssl, + uuid_stack, sr_path, auth_token): + return self.call( + 'download_vhd', + image_id=image_id, + glance_host=glance_host, + glance_port=glance_port, + glance_use_ssl=glance_use_ssl, + uuid_stack=uuid_stack, + sr_path=sr_path, + auth_token=auth_token) + + def upload_vhd(self, vdi_uuids, image_id, glance_host, glance_port, + glance_use_ssl, sr_path, auth_token, properties): + return self.call( + 'upload_vhd', + vdi_uuids=vdi_uuids, + image_id=image_id, + glance_host=glance_host, + glance_port=glance_port, + glance_use_ssl=glance_use_ssl, + sr_path=sr_path, + auth_token=auth_token, + properties=properties) + + +class NFSBasedVolumeOperations(object): + def __init__(self, session_factory): + self._session_factory = session_factory + self.glance_plugin = GlancePluginProxy(session_factory) + + def create_volume(self, server, serverpath, size, + name=None, description=None): + with self._session_factory.get_session() as session: + host_ref = session.get_this_host() + with session.new_sr_on_nfs(host_ref, server, serverpath, + name, description) as sr_ref: + vdi_ref = session.create_new_vdi(sr_ref, size) + + return dict( + sr_uuid=session.SR.get_uuid(sr_ref), + vdi_uuid=session.VDI.get_uuid(vdi_ref) + ) + + def delete_volume(self, server, serverpath, sr_uuid, vdi_uuid): + with self._session_factory.get_session() as session: + refs = session.connect_volume( + server, serverpath, sr_uuid, vdi_uuid) + + session.VDI.destroy(refs['vdi_ref']) + sr_ref = refs['sr_ref'] + session.unplug_pbds_from_sr(sr_ref) + session.SR.destroy(sr_ref) + + def connect_volume(self, server, serverpath, sr_uuid, vdi_uuid): + with self._session_factory.get_session() as session: + refs = session.connect_volume( + server, serverpath, sr_uuid, vdi_uuid) + + return session.VDI.get_uuid(refs['vdi_ref']) + + def disconnect_volume(self, vdi_uuid): + with self._session_factory.get_session() as session: + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + vdi_rec = session.VDI.get_record(vdi_ref) + sr_ref = vdi_rec['SR'] + session.unplug_pbds_and_forget_sr(sr_ref) + + def copy_volume(self, server, serverpath, sr_uuid, vdi_uuid, + name=None, description=None): + with self._session_factory.get_session() as session: + src_refs = session.connect_volume( + server, serverpath, sr_uuid, vdi_uuid) + try: + host_ref = session.get_this_host() + + with session.new_sr_on_nfs(host_ref, server, serverpath, + name, description) as target_sr_ref: + target_vdi_ref = session.copy_vdi_to_sr( + src_refs['vdi_ref'], target_sr_ref) + + dst_refs = dict( + sr_uuid=session.SR.get_uuid(target_sr_ref), + vdi_uuid=session.VDI.get_uuid(target_vdi_ref) + ) + + finally: + session.unplug_pbds_and_forget_sr(src_refs['sr_ref']) + + return dst_refs + + def resize_volume(self, server, serverpath, sr_uuid, vdi_uuid, + size_in_gigabytes): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + try: + with self._session_factory.get_session() as session: + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + session.VDI.resize(vdi_ref, to_bytes(size_in_gigabytes)) + finally: + self.disconnect_volume(vdi_uuid) + + def use_glance_plugin_to_overwrite_volume(self, server, serverpath, + sr_uuid, vdi_uuid, glance_server, + image_id, auth_token, + sr_base_path): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + uuid_stack = [vdi_uuid] + glance_host, glance_port, glance_use_ssl = glance_server + + try: + result = self.glance_plugin.download_vhd( + image_id, glance_host, glance_port, glance_use_ssl, uuid_stack, + os.path.join(sr_base_path, sr_uuid), auth_token) + finally: + self.disconnect_volume(vdi_uuid) + + if len(result) != 1 or result['root']['uuid'] != vdi_uuid: + return False + + return True + + def use_glance_plugin_to_upload_volume(self, server, serverpath, + sr_uuid, vdi_uuid, glance_server, + image_id, auth_token, sr_base_path): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + vdi_uuids = [vdi_uuid] + glance_host, glance_port, glance_use_ssl = glance_server + + try: + result = self.glance_plugin.upload_vhd( + vdi_uuids, image_id, glance_host, glance_port, glance_use_ssl, + os.path.join(sr_base_path, sr_uuid), auth_token, dict()) + finally: + self.disconnect_volume(vdi_uuid) + + @contextlib.contextmanager + def volume_attached_here(self, server, serverpath, sr_uuid, vdi_uuid, + readonly=True): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + with self._session_factory.get_session() as session: + vm_uuid = tools.get_this_vm_uuid() + vm_ref = session.VM.get_by_uuid(vm_uuid) + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + vbd_ref = session.VBD.create( + vm_ref, vdi_ref, userdevice='autodetect', bootable=False, + mode='RO' if readonly else 'RW', type='disk', empty=False, + other_config=dict()) + session.VBD.plug(vbd_ref) + device = session.VBD.get_device(vbd_ref) + try: + yield "/dev/" + device + finally: + session.VBD.unplug(vbd_ref) + session.VBD.destroy(vbd_ref) + self.disconnect_volume(vdi_uuid) diff --git a/cinder/volume/drivers/xenapi/sm.py b/cinder/volume/drivers/xenapi/sm.py new file mode 100644 index 0000000000..c1b64e2c9c --- /dev/null +++ b/cinder/volume/drivers/xenapi/sm.py @@ -0,0 +1,272 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.image import glance +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.xenapi import lib as xenapi_lib + +LOG = logging.getLogger(__name__) + +xenapi_opts = [ + cfg.StrOpt('xenapi_connection_url', + default=None, + help='URL for XenAPI connection'), + cfg.StrOpt('xenapi_connection_username', + default='root', + help='Username for XenAPI connection'), + cfg.StrOpt('xenapi_connection_password', + default=None, + help='Password for XenAPI connection', + secret=True), + cfg.StrOpt('xenapi_sr_base_path', + default='/var/run/sr-mount', + help='Base path to the storage repository'), +] + +xenapi_nfs_opts = [ + cfg.StrOpt('xenapi_nfs_server', + default=None, + help='NFS server to be used by XenAPINFSDriver'), + cfg.StrOpt('xenapi_nfs_serverpath', + default=None, + help='Path of exported NFS, used by XenAPINFSDriver'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(xenapi_opts) +FLAGS.register_opts(xenapi_nfs_opts) + + +class XenAPINFSDriver(driver.VolumeDriver): + def __init__(self, *args, **kwargs): + super(XenAPINFSDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(xenapi_opts) + self.configuration.append_config_values(xenapi_nfs_opts) + + def do_setup(self, context): + session_factory = xenapi_lib.SessionFactory( + self.configuration.xenapi_connection_url, + self.configuration.xenapi_connection_username, + self.configuration.xenapi_connection_password + ) + self.nfs_ops = xenapi_lib.NFSBasedVolumeOperations(session_factory) + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def create_volume(self, volume): + volume_details = self.nfs_ops.create_volume( + self.configuration.xenapi_nfs_server, + self.configuration.xenapi_nfs_serverpath, + volume['size'], + volume['display_name'], + volume['display_description'] + ) + location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details + return dict(provider_location=location) + + def create_export(self, context, volume): + pass + + def delete_volume(self, volume): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + self.nfs_ops.delete_volume( + self.configuration.xenapi_nfs_server, + self.configuration.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid + ) + + def remove_export(self, context, volume): + pass + + def initialize_connection(self, volume, connector): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + return dict( + driver_volume_type='xensm', + data=dict( + name_label=volume['display_name'] or '', + name_description=volume['display_description'] or '', + sr_uuid=sr_uuid, + vdi_uuid=vdi_uuid, + sr_type='nfs', + server=self.configuration.xenapi_nfs_server, + serverpath=self.configuration.xenapi_nfs_serverpath, + introduce_sr_keys=['sr_type', 'server', 'serverpath'] + ) + ) + + def terminate_connection(self, volume, connector, force=False, **kwargs): + pass + + def check_for_setup_error(self): + """To override superclass' method""" + + def create_volume_from_snapshot(self, volume, snapshot): + return self._copy_volume( + snapshot, volume['display_name'], volume['name_description']) + + def create_snapshot(self, snapshot): + volume_id = snapshot['volume_id'] + volume = snapshot['volume'] + return self._copy_volume( + volume, snapshot['display_name'], snapshot['display_description']) + + def _copy_volume(self, volume, target_name, target_desc): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + volume_details = self.nfs_ops.copy_volume( + self.configuration.xenapi_nfs_server, + self.configuration.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + target_name, + target_desc + ) + location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details + return dict(provider_location=location) + + def delete_snapshot(self, snapshot): + self.delete_volume(snapshot) + + def ensure_export(self, context, volume): + pass + + def copy_image_to_volume(self, context, volume, image_service, image_id): + if is_xenserver_image(context, image_service, image_id): + return self._use_glance_plugin_to_copy_image_to_volume( + context, volume, image_service, image_id) + + return self._use_image_utils_to_pipe_bytes_to_volume( + context, volume, image_service, image_id) + + def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume, + image_service, image_id): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server, + FLAGS.xenapi_nfs_serverpath, + sr_uuid, vdi_uuid, + False) as device: + image_utils.fetch_to_raw(context, + image_service, + image_id, + device) + + def _use_glance_plugin_to_copy_image_to_volume(self, context, volume, + image_service, image_id): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + api_servers = glance.get_api_servers() + glance_server = api_servers.next() + auth_token = context.auth_token + + overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume( + FLAGS.xenapi_nfs_server, + FLAGS.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + glance_server, + image_id, + auth_token, + FLAGS.xenapi_sr_base_path) + + if overwrite_result is False: + raise exception.ImageCopyFailure() + + self.nfs_ops.resize_volume( + FLAGS.xenapi_nfs_server, + FLAGS.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + volume['size']) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + if is_xenserver_format(image_meta): + return self._use_glance_plugin_to_upload_volume( + context, volume, image_service, image_meta) + + return self._use_image_utils_to_upload_volume( + context, volume, image_service, image_meta) + + def _use_image_utils_to_upload_volume(self, context, volume, image_service, + image_meta): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server, + FLAGS.xenapi_nfs_serverpath, + sr_uuid, vdi_uuid, + True) as device: + image_utils.upload_volume(context, + image_service, + image_meta, + device) + + def _use_glance_plugin_to_upload_volume(self, context, volume, + image_service, image_meta): + image_id = image_meta['id'] + + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + api_servers = glance.get_api_servers() + glance_server = api_servers.next() + auth_token = context.auth_token + + self.nfs_ops.use_glance_plugin_to_upload_volume( + FLAGS.xenapi_nfs_server, + FLAGS.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + glance_server, + image_id, + auth_token, + FLAGS.xenapi_sr_base_path) + + def get_volume_stats(self, refresh=False): + if refresh or not self._stats: + data = {} + + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'XenAPINFS', + data['vendor_name'] = 'Open Source', + data['driver_version'] = '1.0' + data['storage_protocol'] = 'xensm' + data['total_capacity_gb'] = 'unknown' + data['free_capacity_gb'] = 'unknown' + data['reserved_percentage'] = 0 + self._stats = data + + return self._stats + + +def is_xenserver_image(context, image_service, image_id): + image_meta = image_service.show(context, image_id) + return is_xenserver_format(image_meta) + + +def is_xenserver_format(image_meta): + return ( + image_meta['disk_format'] == 'vhd' + and image_meta['container_format'] == 'ovf' + ) diff --git a/cinder/volume/drivers/xenapi/tools.py b/cinder/volume/drivers/xenapi/tools.py new file mode 100644 index 0000000000..d452fbfa77 --- /dev/null +++ b/cinder/volume/drivers/xenapi/tools.py @@ -0,0 +1,7 @@ +def _stripped_first_line_of(filename): + with open(filename, 'rb') as f: + return f.readline().strip() + + +def get_this_vm_uuid(): + return _stripped_first_line_of('/sys/hypervisor/uuid') diff --git a/cinder/volume/drivers/xiv.py b/cinder/volume/drivers/xiv.py new file mode 100644 index 0000000000..2b60497e8f --- /dev/null +++ b/cinder/volume/drivers/xiv.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 IBM Corp. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Erik Zaadi +# Avishay Traeger + +""" +Volume driver for IBM XIV storage systems. +""" + +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.volume.drivers.san import san + +ibm_xiv_opts = [ + cfg.StrOpt('xiv_proxy', + default='xiv_openstack.nova_proxy.XIVNovaProxy', + help='Proxy driver'), +] + +FLAGS = flags.FLAGS +FLAGS.register_opts(ibm_xiv_opts) + +LOG = logging.getLogger('cinder.volume.xiv') + + +class XIVDriver(san.SanISCSIDriver): + """IBM XIV volume driver.""" + + def __init__(self, *args, **kwargs): + """Initialize the driver.""" + + proxy = importutils.import_class(FLAGS.xiv_proxy) + + self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login, + "xiv_pass": FLAGS.san_password, + "xiv_address": FLAGS.san_ip, + "xiv_vol_pool": FLAGS.san_clustername}, + LOG, + exception) + san.SanISCSIDriver.__init__(self, *args, **kwargs) + + def do_setup(self, context): + """Setup and verify IBM XIV storage connection.""" + + self.xiv_proxy.setup(context) + + def ensure_export(self, context, volume): + """Ensure an export.""" + + return self.xiv_proxy.ensure_export(context, volume) + + def create_export(self, context, volume): + """Create an export.""" + + return self.xiv_proxy.create_export(context, volume) + + def create_volume(self, volume): + """Create a volume on the IBM XIV storage system.""" + + return self.xiv_proxy.create_volume(volume) + + def delete_volume(self, volume): + """Delete a volume on the IBM XIV storage system.""" + + self.xiv_proxy.delete_volume(volume) + + def remove_export(self, context, volume): + """Disconnect a volume from an attached instance.""" + + return self.xiv_proxy.remove_export(context, volume) + + def initialize_connection(self, volume, connector): + """Map the created volume.""" + + return self.xiv_proxy.initialize_connection(volume, connector) + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume.""" + + return self.xiv_proxy.terminate_connection(volume, connector) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + + return self.xiv_proxy.create_volume_from_snapshot(volume, + snapshot) + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + + return self.xiv_proxy.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + + return self.xiv_proxy.delete_snapshot(snapshot) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + + return self.xiv_proxy.get_volume_stats(refresh) diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py new file mode 100644 index 0000000000..988ac647af --- /dev/null +++ b/cinder/volume/drivers/zadara.py @@ -0,0 +1,491 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Zadara Storage, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for Zadara Virtual Private Storage Array (VPSA). + +This driver requires VPSA with API ver.12.06 or higher. +""" + +import httplib + +from lxml import etree +from oslo.config import cfg + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver + +LOG = logging.getLogger("cinder.volume.driver") + +zadara_opts = [ + cfg.StrOpt('zadara_vpsa_ip', + default=None, + help='Management IP of Zadara VPSA'), + cfg.StrOpt('zadara_vpsa_port', + default=None, + help='Zadara VPSA port number'), + cfg.BoolOpt('zadara_vpsa_use_ssl', + default=False, + help='Use SSL connection'), + cfg.StrOpt('zadara_user', + default=None, + help='User name for the VPSA'), + cfg.StrOpt('zadara_password', + default=None, + help='Password for the VPSA', + secret=True), + + cfg.StrOpt('zadara_vpsa_poolname', + default=None, + help='Name of VPSA storage pool for volumes'), + + cfg.StrOpt('zadara_default_cache_policy', + default='write-through', + help='Default cache policy for volumes'), + cfg.StrOpt('zadara_default_encryption', + default='NO', + help='Default encryption policy for volumes'), + cfg.StrOpt('zadara_default_striping_mode', + default='simple', + help='Default striping mode for volumes'), + cfg.StrOpt('zadara_default_stripesize', + default='64', + help='Default stripe size for volumes'), + cfg.StrOpt('zadara_vol_name_template', + default='OS_%s', + help='Default template for VPSA volume names'), + cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete', + default=True, + help="Automatically detach from servers on volume delete"), + cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete', + default=True, + help="Don't halt on deletion of non-existing volumes"), ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(zadara_opts) + + +class ZadaraVPSAConnection(object): + """Executes volume driver commands on VPSA.""" + + def __init__(self, host, port, ssl, user, password): + self.host = host + self.port = port + self.use_ssl = ssl + self.user = user + self.password = password + self.access_key = None + + self.ensure_connection() + + def _generate_vpsa_cmd(self, cmd, **kwargs): + """Generate command to be sent to VPSA.""" + + def _joined_params(params): + param_str = [] + for k, v in params.items(): + param_str.append("%s=%s" % (k, v)) + return '&'.join(param_str) + + # Dictionary of applicable VPSA commands in the following format: + # 'command': (method, API_URL, {optional parameters}) + vpsa_commands = { + 'login': ('POST', + '/api/users/login.xml', + {'user': self.user, + 'password': self.password}), + + # Volume operations + 'create_volume': ('POST', + '/api/volumes.xml', + {'display_name': kwargs.get('name'), + 'virtual_capacity': kwargs.get('size'), + 'raid_group_name[]': FLAGS.zadara_vpsa_poolname, + 'quantity': 1, + 'cache': FLAGS.zadara_default_cache_policy, + 'crypt': FLAGS.zadara_default_encryption, + 'mode': FLAGS.zadara_default_striping_mode, + 'stripesize': FLAGS.zadara_default_stripesize, + 'force': 'NO'}), + 'delete_volume': ('DELETE', + '/api/volumes/%s.xml' % kwargs.get('vpsa_vol'), + {}), + + # Server operations + 'create_server': ('POST', + '/api/servers.xml', + {'display_name': kwargs.get('initiator'), + 'iqn': kwargs.get('initiator')}), + + # Attach/Detach operations + 'attach_volume': ('POST', + '/api/servers/%s/volumes.xml' + % kwargs.get('vpsa_srv'), + {'volume_name[]': kwargs.get('vpsa_vol'), + 'force': 'NO'}), + 'detach_volume': ('POST', + '/api/volumes/%s/detach.xml' + % kwargs.get('vpsa_vol'), + {'server_name[]': kwargs.get('vpsa_srv'), + 'force': 'NO'}), + + # Get operations + 'list_volumes': ('GET', + '/api/volumes.xml', + {}), + 'list_controllers': ('GET', + '/api/vcontrollers.xml', + {}), + 'list_servers': ('GET', + '/api/servers.xml', + {}), + 'list_vol_attachments': ('GET', + '/api/volumes/%s/servers.xml' + % kwargs.get('vpsa_vol'), + {}), } + + if cmd not in vpsa_commands.keys(): + raise exception.UnknownCmd(cmd=cmd) + else: + (method, url, params) = vpsa_commands[cmd] + + if method == 'GET': + # For GET commands add parameters to the URL + params.update(dict(access_key=self.access_key, + page=1, start=0, limit=0)) + url += '?' + _joined_params(params) + body = '' + + elif method == 'DELETE': + # For DELETE commands add parameters to the URL + params.update(dict(access_key=self.access_key)) + url += '?' + _joined_params(params) + body = '' + + elif method == 'POST': + if self.access_key: + params.update(dict(access_key=self.access_key)) + body = _joined_params(params) + + else: + raise exception.UnknownCmd(cmd=method) + + return (method, url, body) + + def ensure_connection(self, cmd=None): + """Retrieve access key for VPSA connection.""" + + if self.access_key or cmd == 'login': + return + + cmd = 'login' + xml_tree = self.send_cmd(cmd) + user = xml_tree.find('user') + if user is None: + raise exception.MalformedResponse(cmd=cmd, + reason='no "user" field') + + access_key = user.findtext('access-key') + if access_key is None: + raise exception.MalformedResponse(cmd=cmd, + reason='no "access-key" field') + + self.access_key = access_key + + def send_cmd(self, cmd, **kwargs): + """Send command to VPSA Controller.""" + + self.ensure_connection(cmd) + + (method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs) + LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"') + % locals()) + + if self.use_ssl: + connection = httplib.HTTPSConnection(self.host, self.port) + else: + connection = httplib.HTTPConnection(self.host, self.port) + connection.request(method, url, body) + response = connection.getresponse() + + if response.status != 200: + connection.close() + raise exception.BadHTTPResponseStatus(status=response.status) + data = response.read() + connection.close() + + xml_tree = etree.fromstring(data) + status = xml_tree.findtext('status') + if status != '0': + raise exception.FailedCmdWithDump(status=status, data=data) + + if method in ['POST', 'DELETE']: + LOG.debug(_('Operation completed. %(data)s') % locals()) + return xml_tree + + +class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): + """Zadara VPSA iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """ + Any initialization the volume driver does while starting. + Establishes initial connection with VPSA and retrieves access_key. + """ + self.vpsa = ZadaraVPSAConnection(FLAGS.zadara_vpsa_ip, + FLAGS.zadara_vpsa_port, + FLAGS.zadara_vpsa_use_ssl, + FLAGS.zadara_user, + FLAGS.zadara_password) + + def check_for_setup_error(self): + """Returns an error (exception) if prerequisites aren't met.""" + self.vpsa.ensure_connection() + + def local_path(self, volume): + """Return local path to existing local volume.""" + raise NotImplementedError() + + def _xml_parse_helper(self, xml_tree, first_level, search_tuple, + first=True): + """ + Helper for parsing VPSA's XML output. + + Returns single item if first==True or list for multiple selection. + If second argument in search_tuple is None - returns all items with + appropriate key. + """ + + objects = xml_tree.find(first_level) + if objects is None: + return None + + result_list = [] + (key, value) = search_tuple + for object in objects.getchildren(): + found_value = object.findtext(key) + if found_value and (found_value == value or value is None): + if first: + return object + else: + result_list.append(object) + return result_list if result_list else None + + def _get_vpsa_volume_name(self, name): + """Return VPSA's name for the volume.""" + xml_tree = self.vpsa.send_cmd('list_volumes') + volume = self._xml_parse_helper(xml_tree, 'volumes', + ('display-name', name)) + if volume is not None: + return volume.findtext('name') + + return None + + def _get_active_controller_details(self): + """Return details of VPSA's active controller.""" + xml_tree = self.vpsa.send_cmd('list_controllers') + ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers', + ('state', 'active')) + if ctrl is not None: + return dict(target=ctrl.findtext('target'), + ip=ctrl.findtext('iscsi-ip'), + chap_user=ctrl.findtext('chap-username'), + chap_passwd=ctrl.findtext('chap-target-secret')) + return None + + def _get_server_name(self, initiator): + """Return VPSA's name for server object with given IQN.""" + xml_tree = self.vpsa.send_cmd('list_servers') + server = self._xml_parse_helper(xml_tree, 'servers', + ('iqn', initiator)) + if server is not None: + return server.findtext('name') + return None + + def _create_vpsa_server(self, initiator): + """Create server object within VPSA (if doesn't exist).""" + vpsa_srv = self._get_server_name(initiator) + if not vpsa_srv: + xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator) + vpsa_srv = xml_tree.findtext('server-name') + return vpsa_srv + + def create_volume(self, volume): + """Create volume.""" + self.vpsa.send_cmd( + 'create_volume', + name=FLAGS.zadara_vol_name_template % volume['name'], + size=volume['size']) + + def delete_volume(self, volume): + """ + Delete volume. + + Return ok if doesn't exist. Auto detach from all servers. + """ + # Get volume name + name = FLAGS.zadara_vol_name_template % volume['name'] + vpsa_vol = self._get_vpsa_volume_name(name) + if not vpsa_vol: + msg = _('Volume %(name)s could not be found. ' + 'It might be already deleted') % locals() + LOG.warning(msg) + if FLAGS.zadara_vpsa_allow_nonexistent_delete: + return + else: + raise exception.VolumeNotFound(volume_id=name) + + # Check attachment info and detach from all + xml_tree = self.vpsa.send_cmd('list_vol_attachments', + vpsa_vol=vpsa_vol) + servers = self._xml_parse_helper(xml_tree, 'servers', + ('iqn', None), first=False) + if servers: + if not FLAGS.zadara_vpsa_auto_detach_on_delete: + raise exception.VolumeAttached(volume_id=name) + + for server in servers: + vpsa_srv = server.findtext('name') + if vpsa_srv: + self.vpsa.send_cmd('detach_volume', + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_vol) + + # Delete volume + self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol) + + def create_export(self, context, volume): + """Irrelevant for VPSA volumes. Export created during attachment.""" + pass + + def ensure_export(self, context, volume): + """Irrelevant for VPSA volumes. Export created during attachment.""" + pass + + def remove_export(self, context, volume): + """Irrelevant for VPSA volumes. Export removed during detach.""" + pass + + def initialize_connection(self, volume, connector): + """ + Attach volume to initiator/host. + + During this call VPSA exposes volume to particular Initiator. It also + creates a 'server' entity for Initiator (if it was not created before) + + All necessary connection information is returned, including auth data. + Connection data (target, LUN) is not stored in the DB. + """ + + # Get/Create server name for IQN + initiator_name = connector['initiator'] + vpsa_srv = self._create_vpsa_server(initiator_name) + if not vpsa_srv: + raise exception.ZadaraServerCreateFailure(name=initiator_name) + + # Get volume name + name = FLAGS.zadara_vol_name_template % volume['name'] + vpsa_vol = self._get_vpsa_volume_name(name) + if not vpsa_vol: + raise exception.VolumeNotFound(volume_id=name) + + # Get Active controller details + ctrl = self._get_active_controller_details() + if not ctrl: + raise exception.ZadaraVPSANoActiveController() + + # Attach volume to server + self.vpsa.send_cmd('attach_volume', + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_vol) + + # Get connection info + xml_tree = self.vpsa.send_cmd('list_vol_attachments', + vpsa_vol=vpsa_vol) + server = self._xml_parse_helper(xml_tree, 'servers', + ('iqn', initiator_name)) + if server is None: + raise exception.ZadaraAttachmentsNotFound(name=name) + target = server.findtext('target') + lun = server.findtext('lun') + if target is None or lun is None: + raise exception.ZadaraInvalidAttachmentInfo( + name=name, + reason='target=%s, lun=%s' % (target, lun)) + + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260') + properties['target_iqn'] = target + properties['target_lun'] = lun + properties['volume_id'] = volume['id'] + + properties['auth_method'] = 'CHAP' + properties['auth_username'] = ctrl['chap_user'] + properties['auth_password'] = ctrl['chap_passwd'] + + LOG.debug(_('Attach properties: %(properties)s') % locals()) + return {'driver_volume_type': 'iscsi', + 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """ + Detach volume from the initiator. + """ + # Get server name for IQN + initiator_name = connector['initiator'] + vpsa_srv = self._get_server_name(initiator_name) + if not vpsa_srv: + raise exception.ZadaraServerNotFound(name=initiator_name) + + # Get volume name + name = FLAGS.zadara_vol_name_template % volume['name'] + vpsa_vol = self._get_vpsa_volume_name(name) + if not vpsa_vol: + raise exception.VolumeNotFound(volume_id=name) + + # Detach volume from server + self.vpsa.send_cmd('detach_volume', + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_vol) + + def create_volume_from_snapshot(self, volume, snapshot): + raise NotImplementedError() + + def create_snapshot(self, snapshot): + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + raise NotImplementedError() + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + raise NotImplementedError() + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + raise NotImplementedError() diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py new file mode 100644 index 0000000000..82871a538d --- /dev/null +++ b/cinder/volume/manager.py @@ -0,0 +1,725 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume manager manages creating, attaching, detaching, and persistent storage. + +Persistent storage volumes keep their state independent of instances. You can +attach to an instance, terminate the instance, spawn a new instance (even +one from a different image) and re-attach the volume with the same data +intact. + +**Related Flags** + +:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`). +:volume_manager: The module name of a class derived from + :class:`manager.Manager` (default: + :class:`cinder.volume.manager.Manager`). +:volume_driver: Used by :class:`Manager`. Defaults to + :class:`cinder.volume.drivers.lvm.LVMISCSIDriver`. +:volume_group: Name of the group that will contain exported volumes (default: + `cinder-volumes`) +:num_shell_tries: Number of times to attempt to run commands (default: 3) + +""" + + +import sys +import traceback + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.image import glance +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils +from cinder import quota +from cinder import utils +from cinder.volume.configuration import Configuration +from cinder.volume import utils as volume_utils + +LOG = logging.getLogger(__name__) + +QUOTAS = quota.QUOTAS + +volume_manager_opts = [ + cfg.StrOpt('volume_driver', + default='cinder.volume.drivers.lvm.LVMISCSIDriver', + help='Driver to use for volume creation'), +] + +CONF = cfg.CONF +CONF.register_opts(volume_manager_opts) + +MAPPING = { + 'cinder.volume.driver.RBDDriver': 'cinder.volume.drivers.rbd.RBDDriver', + 'cinder.volume.driver.SheepdogDriver': + 'cinder.volume.drivers.sheepdog.SheepdogDriver', + 'cinder.volume.nexenta.volume.NexentaDriver': + 'cinder.volume.drivers.nexenta.volume.NexentaDriver', + 'cinder.volume.san.SanISCSIDriver': + 'cinder.volume.drivers.san.san.SanISCSIDriver', + 'cinder.volume.san.SolarisISCSIDriver': + 'cinder.volume.drivers.san.solaris.SolarisISCSIDriver', + 'cinder.volume.san.HpSanISCSIDriver': + 'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver', + 'cinder.volume.netapp.NetAppISCSIDriver': + 'cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver', + 'cinder.volume.netapp.NetAppCmodeISCSIDriver': + 'cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver', + 'cinder.volume.netapp_nfs.NetAppNFSDriver': + 'cinder.volume.drivers.netapp.nfs.NetAppNFSDriver', + 'cinder.volume.nfs.NfsDriver': + 'cinder.volume.drivers.nfs.NfsDriver', + 'cinder.volume.solidfire.SolidFire': + 'cinder.volume.drivers.solidfire.SolidFire', + 'cinder.volume.storwize_svc.StorwizeSVCDriver': + 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver', + 'cinder.volume.windows.WindowsDriver': + 'cinder.volume.drivers.windows.WindowsDriver', + 'cinder.volume.xiv.XIVDriver': + 'cinder.volume.drivers.xiv.XIVDriver', + 'cinder.volume.zadara.ZadaraVPSAISCSIDriver': + 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver', + 'cinder.volume.driver.ISCSIDriver': + 'cinder.volume.drivers.lvm.LVMISCSIDriver'} + + +class VolumeManager(manager.SchedulerDependentManager): + """Manages attachable block storage devices.""" + + RPC_API_VERSION = '1.4' + + def __init__(self, volume_driver=None, service_name=None, + *args, **kwargs): + """Load the driver from the one specified in args, or from flags.""" + self.configuration = Configuration(volume_manager_opts, + config_group=service_name) + if not volume_driver: + # Get from configuration, which will get the default + # if its not using the multi backend + volume_driver = self.configuration.volume_driver + if volume_driver in MAPPING: + LOG.warn(_("Driver path %s is deprecated, update your " + "configuration to the new path."), volume_driver) + volume_driver = MAPPING[volume_driver] + self.driver = importutils.import_object( + volume_driver, + configuration=self.configuration) + # update_service_capabilities needs service_name to be volume + super(VolumeManager, self).__init__(service_name='volume', + *args, **kwargs) + # NOTE(vish): Implementation specific db handling is done + # by the driver. + self.driver.db = self.db + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service.""" + + ctxt = context.get_admin_context() + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + + volumes = self.db.volume_get_all_by_host(ctxt, self.host) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) + for volume in volumes: + if volume['status'] in ['available', 'in-use']: + self.driver.ensure_export(ctxt, volume) + elif volume['status'] == 'downloading': + LOG.info(_("volume %s stuck in a downloading state"), + volume['id']) + self.driver.clear_download(ctxt, volume) + self.db.volume_update(ctxt, volume['id'], {'status': 'error'}) + else: + LOG.info(_("volume %s: skipping export"), volume['name']) + + LOG.debug(_('Resuming any in progress delete operations')) + for volume in volumes: + if volume['status'] == 'deleting': + LOG.info(_('Resuming delete on volume: %s') % volume['id']) + self.delete_volume(ctxt, volume['id']) + + # collect and publish service capabilities + self.publish_service_capabilities(ctxt) + + def _create_volume(self, context, volume_ref, snapshot_ref, + srcvol_ref, image_service, image_id, image_location): + cloned = None + model_update = False + + if all(x is None for x in(snapshot_ref, image_id, srcvol_ref)): + model_update = self.driver.create_volume(volume_ref) + elif snapshot_ref is not None: + model_update = self.driver.create_volume_from_snapshot( + volume_ref, + snapshot_ref) + elif srcvol_ref is not None: + model_update = self.driver.create_cloned_volume(volume_ref, + srcvol_ref) + else: + # create the volume from an image + cloned = self.driver.clone_image(volume_ref, image_location) + if not cloned: + model_update = self.driver.create_volume(volume_ref) + + updates = dict(model_update or dict(), status='downloading') + volume_ref = self.db.volume_update(context, + volume_ref['id'], + updates) + + self._copy_image_to_volume(context, + volume_ref, + image_service, + image_id) + + return model_update, cloned + + def create_volume(self, context, volume_id, request_spec=None, + filter_properties=None, allow_reschedule=True, + snapshot_id=None, image_id=None, source_volid=None): + """Creates and exports the volume.""" + context = context.elevated() + if filter_properties is None: + filter_properties = {} + volume_ref = self.db.volume_get(context, volume_id) + self._notify_about_volume_usage(context, volume_ref, "create.start") + + # NOTE(vish): so we don't have to get volume from db again + # before passing it to the driver. + volume_ref['host'] = self.host + + status = 'available' + model_update = False + image_meta = None + cloned = False + + try: + vol_name = volume_ref['name'] + vol_size = volume_ref['size'] + LOG.debug(_("volume %(vol_name)s: creating lv of" + " size %(vol_size)sG") % locals()) + snapshot_ref = None + sourcevol_ref = None + image_service = None + image_location = None + image_meta = None + + if snapshot_id is not None: + LOG.info(_("volume %s: creating from snapshot"), + volume_ref['name']) + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + elif source_volid is not None: + LOG.info(_("volume %s: creating from existing volume"), + volume_ref['name']) + sourcevol_ref = self.db.volume_get(context, source_volid) + elif image_id is not None: + LOG.info(_("volume %s: creating from image"), + volume_ref['name']) + # create the volume from an image + image_service, image_id = \ + glance.get_remote_image_service(context, + image_id) + image_location = image_service.get_location(context, image_id) + image_meta = image_service.show(context, image_id) + else: + LOG.info(_("volume %s: creating"), volume_ref['name']) + + try: + model_update, cloned = self._create_volume(context, + volume_ref, + snapshot_ref, + sourcevol_ref, + image_service, + image_id, + image_location) + except Exception: + # restore source volume status before reschedule + if sourcevol_ref is not None: + self.db.volume_update(context, sourcevol_ref['id'], + {'status': sourcevol_ref['status']}) + exc_info = sys.exc_info() + # try to re-schedule volume: + self._reschedule_or_reraise(context, volume_id, exc_info, + snapshot_id, image_id, + request_spec, filter_properties, + allow_reschedule) + return + + if model_update: + volume_ref = self.db.volume_update( + context, volume_ref['id'], model_update) + if sourcevol_ref is not None: + self.db.volume_glance_metadata_copy_from_volume_to_volume( + context, + source_volid, + volume_id) + + LOG.debug(_("volume %s: creating export"), volume_ref['name']) + model_update = self.driver.create_export(context, volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) + + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_ref['id'], {'status': 'error'}) + LOG.error(_("volume %s: create failed"), volume_ref['name']) + + if snapshot_id: + # Copy any Glance metadata from the original volume + self.db.volume_glance_metadata_copy_to_volume(context, + volume_ref['id'], + snapshot_id) + + if image_id and not cloned: + if image_meta: + # Copy all of the Glance image properties to the + # volume_glance_metadata table for future reference. + self.db.volume_glance_metadata_create(context, + volume_ref['id'], + 'image_id', image_id) + name = image_meta.get('name', None) + if name: + self.db.volume_glance_metadata_create(context, + volume_ref['id'], + 'image_name', name) + image_properties = image_meta.get('properties', {}) + for key, value in image_properties.items(): + self.db.volume_glance_metadata_create(context, + volume_ref['id'], + key, value) + + now = timeutils.utcnow() + self.db.volume_update(context, + volume_ref['id'], {'status': status, + 'launched_at': now}) + LOG.info(_("volume %s: created successfully"), volume_ref['name']) + self._reset_stats() + + self._notify_about_volume_usage(context, volume_ref, "create.end") + return volume_ref['id'] + + def _log_original_error(self, exc_info): + type_, value, tb = exc_info + LOG.error(_('Error: %s') % + traceback.format_exception(type_, value, tb)) + + def _reschedule_or_reraise(self, context, volume_id, exc_info, + snapshot_id, image_id, request_spec, + filter_properties, allow_reschedule): + """Try to re-schedule the create or re-raise the original error to + error out the volume. + """ + if not allow_reschedule: + raise exc_info[0], exc_info[1], exc_info[2] + + rescheduled = False + + try: + method_args = (CONF.volume_topic, volume_id, snapshot_id, + image_id, request_spec, filter_properties) + + rescheduled = self._reschedule(context, request_spec, + filter_properties, volume_id, + self.scheduler_rpcapi.create_volume, + method_args, + exc_info) + + except Exception: + rescheduled = False + LOG.exception(_("volume %s: Error trying to reschedule create"), + volume_id) + + if rescheduled: + # log the original build error + self._log_original_error(exc_info) + else: + # not re-scheduling + raise exc_info[0], exc_info[1], exc_info[2] + + def _reschedule(self, context, request_spec, filter_properties, + volume_id, scheduler_method, method_args, + exc_info=None): + """Attempt to re-schedule a volume operation.""" + + retry = filter_properties.get('retry', None) + if not retry: + # no retry information, do not reschedule. + LOG.debug(_("Retry info not present, will not reschedule")) + return + + if not request_spec: + LOG.debug(_("No request spec, will not reschedule")) + return + + request_spec['volume_id'] = volume_id + + LOG.debug(_("volume %(volume_id)s: re-scheduling %(method)s " + "attempt %(num)d") % + {'volume_id': volume_id, + 'method': scheduler_method.func_name, + 'num': retry['num_attempts']}) + + # reset the volume state: + now = timeutils.utcnow() + self.db.volume_update(context, volume_id, + {'status': 'creating', + 'scheduled_at': now}) + + if exc_info: + # stringify to avoid circular ref problem in json serialization: + retry['exc'] = traceback.format_exception(*exc_info) + + scheduler_method(context, *method_args) + return True + + def delete_volume(self, context, volume_id): + """Deletes and unexports volume.""" + context = context.elevated() + volume_ref = self.db.volume_get(context, volume_id) + + if context.project_id != volume_ref['project_id']: + project_id = volume_ref['project_id'] + else: + project_id = context.project_id + + LOG.info(_("volume %s: deleting"), volume_ref['name']) + if volume_ref['attach_status'] == "attached": + # Volume is still attached, need to detach first + raise exception.VolumeAttached(volume_id=volume_id) + if volume_ref['host'] != self.host: + raise exception.InvalidVolume( + reason=_("volume is not local to this node")) + + self._notify_about_volume_usage(context, volume_ref, "delete.start") + self._reset_stats() + try: + LOG.debug(_("volume %s: removing export"), volume_ref['name']) + self.driver.remove_export(context, volume_ref) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) + self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_ref['id'], + {'status': 'error_deleting'}) + + # Get reservations + try: + reservations = QUOTAS.reserve(context, + project_id=project_id, + volumes=-1, + gigabytes=-volume_ref['size']) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting volume")) + + self.db.volume_glance_metadata_delete_by_volume(context, volume_id) + self.db.volume_destroy(context, volume_id) + LOG.info(_("volume %s: deleted successfully"), volume_ref['name']) + self._notify_about_volume_usage(context, volume_ref, "delete.end") + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + + self.publish_service_capabilities(context) + + return True + + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + self._notify_about_snapshot_usage( + context, snapshot_ref, "create.start") + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], + model_update) + + except Exception: + with excutils.save_and_reraise_exception(): + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error'}) + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + self.db.volume_glance_metadata_copy_to_snapshot(context, + snapshot_ref['id'], + volume_id) + LOG.info(_("snapshot %s: created successfully"), snapshot_ref['name']) + self._notify_about_snapshot_usage(context, snapshot_ref, "create.end") + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: deleting"), snapshot_ref['name']) + self._notify_about_snapshot_usage( + context, snapshot_ref, "delete.start") + + if context.project_id != snapshot_ref['project_id']: + project_id = snapshot_ref['project_id'] + else: + project_id = context.project_id + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except exception.SnapshotIsBusy: + LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name']) + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'available'}) + return True + except Exception: + with excutils.save_and_reraise_exception(): + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + + # Get reservations + try: + if CONF.no_snapshot_gb_quota: + reservations = QUOTAS.reserve(context, + project_id=project_id, + snapshots=-1) + else: + reservations = QUOTAS.reserve( + context, + project_id=project_id, + snapshots=-1, + gigabytes=-snapshot_ref['volume_size']) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting snapshot")) + self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) + self.db.snapshot_destroy(context, snapshot_id) + LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end") + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + return True + + def attach_volume(self, context, volume_id, instance_uuid, mountpoint): + """Updates db to show volume is attached""" + + @utils.synchronized(volume_id, external=True) + def do_attach(): + # check the volume status before attaching + volume = self.db.volume_get(context, volume_id) + if volume['status'] == 'attaching': + if (volume['instance_uuid'] and volume['instance_uuid'] != + instance_uuid): + msg = _("being attached by another instance") + raise exception.InvalidVolume(reason=msg) + elif volume['status'] != "available": + msg = _("status must be available") + raise exception.InvalidVolume(reason=msg) + self.db.volume_update(context, volume_id, + {"instance_uuid": instance_uuid, + "status": "attaching"}) + + # TODO(vish): refactor this into a more general "reserve" + # TODO(sleepsonthefloor): Is this 'elevated' appropriate? + if not uuidutils.is_uuid_like(instance_uuid): + raise exception.InvalidUUID(uuid=instance_uuid) + + try: + self.driver.attach_volume(context, + volume_id, + instance_uuid, + mountpoint) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_id, + {'status': 'error_attaching'}) + + self.db.volume_attached(context.elevated(), + volume_id, + instance_uuid, + mountpoint) + return do_attach() + + def detach_volume(self, context, volume_id): + """Updates db to show volume is detached""" + # TODO(vish): refactor this into a more general "unreserve" + # TODO(sleepsonthefloor): Is this 'elevated' appropriate? + try: + self.driver.detach_volume(context, volume_id) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_id, + {'status': 'error_detaching'}) + + self.db.volume_detached(context.elevated(), volume_id) + + # Check for https://bugs.launchpad.net/cinder/+bug/1065702 + volume_ref = self.db.volume_get(context, volume_id) + if (volume_ref['provider_location'] and + volume_ref['name'] not in volume_ref['provider_location']): + self.driver.ensure_export(context, volume_ref) + + def _copy_image_to_volume(self, context, volume, image_service, image_id): + """Downloads Glance image to the specified volume. """ + volume_id = volume['id'] + self.driver.copy_image_to_volume(context, volume, + image_service, + image_id) + LOG.debug(_("Downloaded image %(image_id)s to %(volume_id)s " + "successfully") % locals()) + + def copy_volume_to_image(self, context, volume_id, image_meta): + """Uploads the specified volume to Glance. + + image_meta is a dictionary containing the following keys: + 'id', 'container_format', 'disk_format' + + """ + payload = {'volume_id': volume_id, 'image_id': image_meta['id']} + try: + volume = self.db.volume_get(context, volume_id) + self.driver.ensure_export(context.elevated(), volume) + image_service, image_id = \ + glance.get_remote_image_service(context, image_meta['id']) + self.driver.copy_volume_to_image(context, volume, image_service, + image_meta) + LOG.debug(_("Uploaded volume %(volume_id)s to " + "image (%(image_id)s) successfully") % locals()) + except Exception, error: + with excutils.save_and_reraise_exception(): + payload['message'] = unicode(error) + finally: + if volume['instance_uuid'] is None: + self.db.volume_update(context, volume_id, + {'status': 'available'}) + else: + self.db.volume_update(context, volume_id, + {'status': 'in-use'}) + + def initialize_connection(self, context, volume_id, connector): + """Prepare volume for connection from host represented by connector. + + This method calls the driver initialize_connection and returns + it to the caller. The connector parameter is a dictionary with + information about the host that will connect to the volume in the + following format:: + + { + 'ip': ip, + 'initiator': initiator, + } + + ip: the ip address of the connecting machine + + initiator: the iscsi initiator name of the connecting machine. + This can be None if the connecting machine does not support iscsi + connections. + + driver is responsible for doing any necessary security setup and + returning a connection_info dictionary in the following format:: + + { + 'driver_volume_type': driver_volume_type, + 'data': data, + } + + driver_volume_type: a string to identify the type of volume. This + can be used by the calling code to determine the + strategy for connecting to the volume. This could + be 'iscsi', 'rbd', 'sheepdog', etc. + + data: this is the data that the calling code will use to connect + to the volume. Keep in mind that this will be serialized to + json in various places, so it should not contain any non-json + data types. + """ + volume_ref = self.db.volume_get(context, volume_id) + return self.driver.initialize_connection(volume_ref, connector) + + def terminate_connection(self, context, volume_id, connector, force=False): + """Cleanup connection from host represented by connector. + + The format of connector is the same as for initialize_connection. + """ + volume_ref = self.db.volume_get(context, volume_id) + self.driver.terminate_connection(volume_ref, connector, force=force) + + @manager.periodic_task + def _report_driver_status(self, context): + LOG.info(_("Updating volume status")) + volume_stats = self.driver.get_volume_stats(refresh=True) + if volume_stats: + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(volume_stats) + + def publish_service_capabilities(self, context): + """ Collect driver status and then publish """ + self._report_driver_status(context) + self._publish_service_capabilities(context) + + def _reset_stats(self): + LOG.info(_("Clear capabilities")) + self._last_volume_stats = [] + + def notification(self, context, event): + LOG.info(_("Notification {%s} received"), event) + self._reset_stats() + + def _notify_about_volume_usage(self, + context, + volume, + event_suffix, + extra_usage_info=None): + volume_utils.notify_about_volume_usage( + context, volume, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + def _notify_about_snapshot_usage(self, + context, + snapshot, + event_suffix, + extra_usage_info=None): + volume_utils.notify_about_snapshot_usage( + context, snapshot, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py new file mode 100644 index 0000000000..1788873d5d --- /dev/null +++ b/cinder/volume/rpcapi.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Intel, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the volume RPC API. +""" + +from cinder import exception +from cinder import flags +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +FLAGS = flags.FLAGS + + +class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the volume rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Adds clone volume option to create_volume. + 1.2 - Add publish_service_capabilities() method. + 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image + 1.4 - Add request_spec, filter_properties and + allow_reschedule arguments to create_volume(). + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=None): + super(VolumeAPI, self).__init__( + topic=topic or FLAGS.volume_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_volume(self, ctxt, volume, host, + request_spec, filter_properties, + allow_reschedule=True, + snapshot_id=None, image_id=None, + source_volid=None): + self.cast(ctxt, + self.make_msg('create_volume', + volume_id=volume['id'], + request_spec=request_spec, + filter_properties=filter_properties, + allow_reschedule=allow_reschedule, + snapshot_id=snapshot_id, + image_id=image_id, + source_volid=source_volid), + topic=rpc.queue_get_for(ctxt, + self.topic, + host), + version='1.4') + + def delete_volume(self, ctxt, volume): + self.cast(ctxt, + self.make_msg('delete_volume', + volume_id=volume['id']), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) + + def create_snapshot(self, ctxt, volume, snapshot): + self.cast(ctxt, self.make_msg('create_snapshot', + volume_id=volume['id'], + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) + + def delete_snapshot(self, ctxt, snapshot, host): + self.cast(ctxt, self.make_msg('delete_snapshot', + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, host)) + + def attach_volume(self, ctxt, volume, instance_uuid, mountpoint): + return self.call(ctxt, self.make_msg('attach_volume', + volume_id=volume['id'], + instance_uuid=instance_uuid, + mountpoint=mountpoint), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def detach_volume(self, ctxt, volume): + return self.call(ctxt, self.make_msg('detach_volume', + volume_id=volume['id']), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def copy_volume_to_image(self, ctxt, volume, image_meta): + self.cast(ctxt, self.make_msg('copy_volume_to_image', + volume_id=volume['id'], + image_meta=image_meta), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host']), + version='1.3') + + def initialize_connection(self, ctxt, volume, connector): + return self.call(ctxt, self.make_msg('initialize_connection', + volume_id=volume['id'], + connector=connector), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def terminate_connection(self, ctxt, volume, connector, force=False): + return self.call(ctxt, self.make_msg('terminate_connection', + volume_id=volume['id'], + connector=connector, + force=force), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def publish_service_capabilities(self, ctxt): + self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), + version='1.2') diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py new file mode 100644 index 0000000000..9ddd309c18 --- /dev/null +++ b/cinder/volume/utils.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Volume-related Utilities and helpers.""" + +import os +import stat + +from cinder import flags +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common import timeutils +from cinder import utils + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def get_host_from_queue(queuename): + # This assumes the queue is named something like cinder-volume + # and does not have dot separators in the queue name + return queuename.split('@', 1)[0].split('.', 1)[1] + + +def notify_usage_exists(context, volume_ref, current_period=False): + """ Generates 'exists' notification for a volume for usage auditing + purposes. + + Generates usage for last completed period, unless 'current_period' + is True.""" + begin, end = utils.last_completed_audit_period() + if current_period: + audit_start = end + audit_end = timeutils.utcnow() + else: + audit_start = begin + audit_end = end + + extra_usage_info = dict(audit_period_beginning=str(audit_start), + audit_period_ending=str(audit_end)) + + notify_about_volume_usage(context, volume_ref, + 'exists', extra_usage_info=extra_usage_info) + + +def null_safe_str(s): + return str(s) if s else '' + + +def _usage_from_volume(context, volume_ref, **kw): + usage_info = dict(tenant_id=volume_ref['project_id'], + user_id=volume_ref['user_id'], + availability_zone=volume_ref['availability_zone'], + volume_id=volume_ref['id'], + volume_type=volume_ref['volume_type_id'], + display_name=volume_ref['display_name'], + launched_at=null_safe_str(volume_ref['launched_at']), + created_at=null_safe_str(volume_ref['created_at']), + status=volume_ref['status'], + snapshot_id=volume_ref['snapshot_id'], + size=volume_ref['size']) + + usage_info.update(kw) + return usage_info + + +def notify_about_volume_usage(context, volume, event_suffix, + extra_usage_info=None, host=None): + if not host: + host = FLAGS.host + + if not extra_usage_info: + extra_usage_info = {} + + usage_info = _usage_from_volume(context, volume, **extra_usage_info) + + notifier_api.notify(context, 'volume.%s' % host, + 'volume.%s' % event_suffix, + notifier_api.INFO, usage_info) + + +def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info): + usage_info = { + 'tenant_id': snapshot_ref['project_id'], + 'user_id': snapshot_ref['user_id'], + 'availability_zone': snapshot_ref.volume['availability_zone'], + 'volume_id': snapshot_ref['volume_id'], + 'volume_size': snapshot_ref['volume_size'], + 'snapshot_id': snapshot_ref['id'], + 'display_name': snapshot_ref['display_name'], + 'created_at': str(snapshot_ref['created_at']), + 'status': snapshot_ref['status'], + 'deleted': null_safe_str(snapshot_ref['deleted']) + } + + usage_info.update(extra_usage_info) + return usage_info + + +def notify_about_snapshot_usage(context, snapshot, event_suffix, + extra_usage_info=None, host=None): + if not host: + host = FLAGS.host + + if not extra_usage_info: + extra_usage_info = {} + + usage_info = _usage_from_snapshot(context, snapshot, **extra_usage_info) + + notifier_api.notify(context, 'snapshot.%s' % host, + 'snapshot.%s' % event_suffix, + notifier_api.INFO, usage_info) + + +def is_block(path): + mode = os.stat(path).st_mode + return stat.S_ISBLK(mode) diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py new file mode 100644 index 0000000000..dab9626857 --- /dev/null +++ b/cinder/volume/volume_types.py @@ -0,0 +1,158 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2011 Ken Pepple +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Built-in volume type properties.""" + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def create(context, name, extra_specs={}): + """Creates volume types.""" + try: + type_ref = db.volume_type_create(context, + dict(name=name, + extra_specs=extra_specs)) + except exception.DBError, e: + LOG.exception(_('DB error: %s') % e) + raise exception.VolumeTypeCreateFailed(name=name, + extra_specs=extra_specs) + return type_ref + + +def destroy(context, id): + """Marks volume types as deleted.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidVolumeType(reason=msg) + else: + db.volume_type_destroy(context, id) + + +def get_all_types(context, inactive=0, search_opts={}): + """Get all non-deleted volume_types. + + Pass true as argument if you want deleted volume types returned also. + + """ + vol_types = db.volume_type_get_all(context, inactive) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + def _check_extra_specs_match(vol_type, searchdict): + for k, v in searchdict.iteritems(): + if (k not in vol_type['extra_specs'].keys() + or vol_type['extra_specs'][k] != v): + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'extra_specs': _check_extra_specs_match} + + result = {} + for type_name, type_args in vol_types.iteritems(): + # go over all filters in the list + for opt, values in search_opts.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + # no such filter - ignore it, go to next filter + continue + else: + if filter_func(type_args, values): + result[type_name] = type_args + break + vol_types = result + return vol_types + + +def get_volume_type(ctxt, id): + """Retrieves single volume type by id.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidVolumeType(reason=msg) + + if ctxt is None: + ctxt = context.get_admin_context() + + return db.volume_type_get(ctxt, id) + + +def get_volume_type_by_name(context, name): + """Retrieves single volume type by name.""" + if name is None: + msg = _("name cannot be None") + raise exception.InvalidVolumeType(reason=msg) + + return db.volume_type_get_by_name(context, name) + + +def get_default_volume_type(): + """Get the default volume type.""" + name = FLAGS.default_volume_type + vol_type = {} + + if name is not None: + ctxt = context.get_admin_context() + try: + vol_type = get_volume_type_by_name(ctxt, name) + except exception.VolumeTypeNotFoundByName, e: + # Couldn't find volume type with the name in default_volume_type + # flag, record this issue and move on + #TODO(zhiteng) consider add notification to warn admin + LOG.exception(_('Default volume type is not found, ' + 'please check default_volume_type config: %s'), e) + + return vol_type + + +def is_key_value_present(volume_type_id, key, value, volume_type=None): + if volume_type_id is None: + return False + + if volume_type is None: + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + if (volume_type.get('extra_specs') is None or + volume_type['extra_specs'].get(key) != value): + return False + else: + return True + + +def get_volume_type_extra_specs(volume_type_id, key=False): + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + extra_specs = volume_type['extra_specs'] + if key: + if extra_specs.get(key): + return extra_specs.get(key) + else: + return False + else: + return extra_specs diff --git a/cinder/wsgi.py b/cinder/wsgi.py new file mode 100644 index 0000000000..bcd63006af --- /dev/null +++ b/cinder/wsgi.py @@ -0,0 +1,493 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility methods for working with WSGI servers.""" + +import errno +import os +import socket +import ssl +import sys +import time + +import eventlet +import eventlet.wsgi +import greenlet +from oslo.config import cfg +from paste import deploy +import routes.middleware +import webob.dec +import webob.exc + +from cinder import exception +from cinder import flags +from cinder.openstack.common import log as logging +from cinder import utils + +socket_opts = [ + cfg.IntOpt('backlog', + default=4096, + help="Number of backlog requests to configure the socket with"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X."), + cfg.StrOpt('ssl_ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + default=None, + help="Certificate file to use when starting " + "the server securely"), + cfg.StrOpt('ssl_key_file', + default=None, + help="Private key file to use when starting " + "the server securely"), +] + +CONF = cfg.CONF +CONF.register_opts(socket_opts) + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class Server(object): + """Server class to manage a WSGI server, serving a WSGI application.""" + + default_pool_size = 1000 + + def __init__(self, name, app, host=None, port=None, pool_size=None, + protocol=eventlet.wsgi.HttpProtocol): + """Initialize, but do not start, a WSGI server. + + :param name: Pretty name for logging. + :param app: The WSGI application to serve. + :param host: IP address to serve the application. + :param port: Port number to server the application. + :param pool_size: Maximum number of eventlets to spawn concurrently. + :returns: None + + """ + self.name = name + self.app = app + self._host = host or "0.0.0.0" + self._port = port or 0 + self._server = None + self._socket = None + self._protocol = protocol + self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) + self._logger = logging.getLogger("eventlet.wsgi.server") + self._wsgi_logger = logging.WritableLogger(self._logger) + + def _get_socket(self, host, port, backlog): + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + family = socket.AF_INET + + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + ca_file = CONF.ssl_ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + def wrap_ssl(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + sock = None + retry_until = time.time() + 30 + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=backlog, + family=family) + if use_ssl: + sock = wrap_ssl(sock) + + except socket.error, err: + if err.args[0] != errno.EADDRINUSE: + raise + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s " + "after trying for 30 seconds") % + {'host': host, 'port': port}) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + return sock + + def _start(self): + """Run the blocking eventlet WSGI server. + + :returns: None + + """ + eventlet.wsgi.server(self._socket, + self.app, + protocol=self._protocol, + custom_pool=self._pool, + log=self._wsgi_logger) + + def start(self, backlog=128): + """Start serving a WSGI application. + + :param backlog: Maximum number of queued connections. + :returns: None + :raises: cinder.exception.InvalidInput + + """ + if backlog < 1: + raise exception.InvalidInput( + reason='The backlog must be more than 1') + + self._socket = self._get_socket(self._host, + self._port, + backlog=backlog) + self._server = eventlet.spawn(self._start) + (self._host, self._port) = self._socket.getsockname()[0:2] + LOG.info(_("Started %(name)s on %(_host)s:%(_port)s") % self.__dict__) + + @property + def host(self): + return self._host + + @property + def port(self): + return self._port + + def stop(self): + """Stop this server. + + This is not a very nice action, as currently the method by which a + server is stopped is by killing its eventlet. + + :returns: None + + """ + LOG.info(_("Stopping WSGI server.")) + self._server.kill() + + def wait(self): + """Block, until the server has stopped. + + Waits on the server's eventlet to finish, then returns. + + :returns: None + + """ + try: + self._server.wait() + except greenlet.GreenletExit: + LOG.info(_("WSGI server has stopped.")) + + +class Request(webob.Request): + pass + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = cinder.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import cinder.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(detail='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Middleware(Application): + """Base WSGI middleware. + + These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = cinder.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import cinder.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Debug(Middleware): + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + print ('*' * 40) + ' REQUEST ENVIRON' + for key, value in req.environ.items(): + print key, '=', value + print + resp = req.get_response(self.application) + + print ('*' * 40) + ' RESPONSE HEADERS' + for (key, value) in resp.headers.iteritems(): + print key, '=', value + print + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Iterator that prints the contents of a wrapper string.""" + print ('*' * 40) + ' BODY' + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be an object that can route + the request to the action-specific method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, '/svrlist', controller=sc, action='list') + + # Actions are all implicitly defined + mapper.resource('server', 'servers', controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch the request to the appropriate controller. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Loader(object): + """Used to load WSGI applications from paste configurations.""" + + def __init__(self, config_path=None): + """Initialize the loader, and attempt to find the config. + + :param config_path: Full or relative path to the paste config. + :returns: None + + """ + config_path = config_path or FLAGS.api_paste_config + self.config_path = utils.find_config(config_path) + + def load_app(self, name): + """Return the paste URLMap wrapped WSGI application. + + :param name: Name of the application to load. + :returns: Paste URLMap object wrapping the requested application. + :raises: `cinder.exception.PasteAppNotFound` + + """ + try: + return deploy.loadapp("config:%s" % self.config_path, name=name) + except LookupError as err: + LOG.error(err) + raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/contrib/redhat-eventlet.patch b/contrib/redhat-eventlet.patch new file mode 100644 index 0000000000..cf2ff53d51 --- /dev/null +++ b/contrib/redhat-eventlet.patch @@ -0,0 +1,16 @@ +--- .nova-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py.orig +2011-05-25 +23:31:34.597271402 +0000 ++++ .nova-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py +2011-05-25 +23:33:24.055602468 +0000 +@@ -32,7 +32,7 @@ + setattr(self, attr, wrapped_pipe) + __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ + +- def wait(self, check_interval=0.01): ++ def wait(self, check_interval=0.01, timeout=None): + # Instead of a blocking OS call, this version of wait() uses logic + # borrowed from the eventlet 0.2 processes.Process.wait() method. + try: + diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 0000000000..291b04e45d --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,3 @@ +_build/* +source/api/* +.autogenerated diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000000..ba789b5df8 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,97 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXSOURCE = source +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +.DEFAULT_GOAL = html + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + -rm -rf cinder.sqlite + if [ -f .autogenerated ] ; then \ + cat .autogenerated | xargs rm ; \ + rm .autogenerated ; \ + fi + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cinder.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cinder.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/README.rst b/doc/README.rst new file mode 100644 index 0000000000..835dbc5d9f --- /dev/null +++ b/doc/README.rst @@ -0,0 +1,55 @@ +================= +Building the docs +================= + +Dependencies +============ + +Sphinx_ + You'll need sphinx (the python one) and if you are + using the virtualenv you'll need to install it in the virtualenv + specifically so that it can load the cinder modules. + + :: + + pip install Sphinx + +Graphviz_ + Some of the diagrams are generated using the ``dot`` language + from Graphviz. + + :: + + sudo apt-get install graphviz + +.. _Sphinx: http://sphinx.pocoo.org + +.. _Graphviz: http://www.graphviz.org/ + + +Use `make` +========== + +Just type make:: + + % make + +Look in the Makefile for more targets. + + +Manually +======== + + 1. Generate the code.rst file so that Sphinx will pull in our docstrings:: + + % ./generate_autodoc_index.sh > source/code.rst + + 2. Run `sphinx_build`:: + + % sphinx-build -b html source build/html + + +The docs have been built +======================== + +Check out the `build` directory to find them. Yay! diff --git a/doc/ext/__init__.py b/doc/ext/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/ext/cinder_autodoc.py b/doc/ext/cinder_autodoc.py new file mode 100644 index 0000000000..a778f4a522 --- /dev/null +++ b/doc/ext/cinder_autodoc.py @@ -0,0 +1,12 @@ +import gettext +import os + +gettext.install('cinder') + +from cinder import utils + + +def setup(app): + print "**Autodocumenting from %s" % os.path.abspath(os.curdir) + rv = utils.execute('./doc/generate_autodoc_index.sh') + print rv[0] diff --git a/doc/ext/cinder_todo.py b/doc/ext/cinder_todo.py new file mode 100644 index 0000000000..280ea926af --- /dev/null +++ b/doc/ext/cinder_todo.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# This is a hack of the builtin todo extension, to make the todo_list +# more user friendly + +from sphinx.ext.todo import * +import re + + +def _(s): + return s + + +def process_todo_nodes(app, doctree, fromdocname): + if not app.config['todo_include_todos']: + for node in doctree.traverse(todo_node): + node.parent.remove(node) + + # Replace all todolist nodes with a list of the collected todos. + # Augment each todo with a backlink to the original location. + env = app.builder.env + + if not hasattr(env, 'todo_all_todos'): + env.todo_all_todos = [] + + # remove the item that was added in the constructor, since I'm tired of + # reading through docutils for the proper way to construct an empty list + lists = [] + for i in xrange(5): + lists.append(nodes.bullet_list("", nodes.Text('', ''))) + lists[i].remove(lists[i][0]) + lists[i]['classes'].append('todo_list') + + for node in doctree.traverse(todolist): + if not app.config['todo_include_todos']: + node.replace_self([]) + continue + + for todo_info in env.todo_all_todos: + para = nodes.paragraph() + filename = env.doc2path(todo_info['docname'], base=None) + + # Create a reference + newnode = nodes.reference('', '') + + line_info = todo_info['lineno'] + link = _('%(filename)s, line %(line_info)d') % locals() + innernode = nodes.emphasis(link, link) + newnode['refdocname'] = todo_info['docname'] + + try: + newnode['refuri'] = app.builder.get_relative_uri( + fromdocname, todo_info['docname']) + newnode['refuri'] += '#' + todo_info['target']['refid'] + except NoUri: + # ignore if no URI can be determined, e.g. for LaTeX output + pass + + newnode.append(innernode) + para += newnode + para['classes'].append('todo_link') + + todo_entry = todo_info['todo'] + + env.resolve_references(todo_entry, todo_info['docname'], + app.builder) + + item = nodes.list_item('', para) + todo_entry[1]['classes'].append('details') + + comment = todo_entry[1] + + m = re.match(r"^P(\d)", comment.astext()) + priority = 5 + if m: + priority = int(m.group(1)) + if priority < 0: + priority = 1 + if priority > 5: + priority = 5 + + item['classes'].append('todo_p' + str(priority)) + todo_entry['classes'].append('todo_p' + str(priority)) + + item.append(comment) + + lists[priority - 1].insert(0, item) + + node.replace_self(lists) + + +def setup(app): + app.add_config_value('todo_include_todos', False, False) + + app.add_node(todolist) + app.add_node(todo_node, + html=(visit_todo_node, depart_todo_node), + latex=(visit_todo_node, depart_todo_node), + text=(visit_todo_node, depart_todo_node)) + + app.add_directive('todo', Todo) + app.add_directive('todolist', TodoList) + app.connect('doctree-read', process_todos) + app.connect('doctree-resolved', process_todo_nodes) + app.connect('env-purge-doc', purge_todos) diff --git a/doc/find_autodoc_modules.sh b/doc/find_autodoc_modules.sh new file mode 100755 index 0000000000..fb7e451a08 --- /dev/null +++ b/doc/find_autodoc_modules.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +CINDER_DIR='cinder/' # include trailing slash +DOCS_DIR='source' + +modules='' +for x in `find ${CINDER_DIR} -name '*.py' | grep -v cinder/tests`; do + if [ `basename ${x} .py` == "__init__" ] ; then + continue + fi + relative=cinder.`echo ${x} | sed -e 's$^'${CINDER_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` + modules="${modules} ${relative}" +done + +for mod in ${modules} ; do + if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; + then + echo ${mod} + fi +done diff --git a/doc/generate_autodoc_index.sh b/doc/generate_autodoc_index.sh new file mode 100755 index 0000000000..bdfa73a496 --- /dev/null +++ b/doc/generate_autodoc_index.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +SOURCEDIR=doc/source/api + +if [ ! -d ${SOURCEDIR} ] ; then + mkdir -p ${SOURCEDIR} +fi + +for x in `./doc/find_autodoc_modules.sh`; +do + echo "Generating ${SOURCEDIR}/${x}.rst" + echo "${SOURCEDIR}/${x}.rst" >> .autogenerated + heading="The :mod:\`${x}\` Module" + # Figure out how long the heading is + # and make sure to emit that many '=' under + # it to avoid heading format errors + # in Sphinx. + heading_len=$(echo "$heading" | wc -c) + underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') + ( cat < ${SOURCEDIR}/${x}.rst + +done + +if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then + + cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst + done + + echo ${SOURCEDIR}/autoindex.rst >> .autogenerated +fi diff --git a/doc/source/_ga/layout.html b/doc/source/_ga/layout.html new file mode 100644 index 0000000000..f29e909685 --- /dev/null +++ b/doc/source/_ga/layout.html @@ -0,0 +1,17 @@ +{% extends "!layout.html" %} + +{% block footer %} +{{ super() }} + + +{% endblock %} + diff --git a/doc/source/_static/.gitignore b/doc/source/_static/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/source/_static/basic.css b/doc/source/_static/basic.css new file mode 100644 index 0000000000..d909ce37c7 --- /dev/null +++ b/doc/source/_static/basic.css @@ -0,0 +1,416 @@ +/** + * Sphinx stylesheet -- basic theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +img { + border: 0; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +/* -- general body styles --------------------------------------------------- */ + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 0; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +/* -- other body styles ----------------------------------------------------- */ + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlight { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.refcount { + color: #060; +} + +.optional { + font-size: 1.3em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +tt.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +tt.descclassname { + background-color: transparent; +} + +tt.xref, a tt { + background-color: transparent; + font-weight: bold; +} + +h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { + background-color: transparent; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} diff --git a/doc/source/_static/default.css b/doc/source/_static/default.css new file mode 100644 index 0000000000..c8091ecb4d --- /dev/null +++ b/doc/source/_static/default.css @@ -0,0 +1,230 @@ +/** + * Sphinx stylesheet -- default theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: sans-serif; + font-size: 100%; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: #ffffff; + color: #000000; + padding: 0 20px 30px 20px; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #ffffff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + line-height: 30px; + color: #ffffff; +} + +div.related a { + color: #ffffff; +} + +div.sphinxsidebar { +} + +div.sphinxsidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: #ffffff; +} + +div.sphinxsidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: #ffffff; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: #ffffff; +} + +div.sphinxsidebar a { + color: #98dbcc; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +div.body p, div.body dd, div.body li { + text-align: left; + line-height: 130%; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + text-align: left; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: #eeffcc; + color: #333333; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +tt { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +.warning tt { + background: #efc2c2; +} + +.note tt { + background: #d6d6d6; +} diff --git a/doc/source/_static/jquery.tweet.js b/doc/source/_static/jquery.tweet.js new file mode 100644 index 0000000000..79bf0bdb4c --- /dev/null +++ b/doc/source/_static/jquery.tweet.js @@ -0,0 +1,154 @@ +(function($) { + + $.fn.tweet = function(o){ + var s = { + username: ["seaofclouds"], // [string] required, unless you want to display our tweets. :) it can be an array, just do ["username1","username2","etc"] + list: null, //[string] optional name of list belonging to username + avatar_size: null, // [integer] height and width of avatar if displayed (48px max) + count: 3, // [integer] how many tweets to display? + intro_text: null, // [string] do you want text BEFORE your your tweets? + outro_text: null, // [string] do you want text AFTER your tweets? + join_text: null, // [string] optional text in between date and tweet, try setting to "auto" + auto_join_text_default: "i said,", // [string] auto text for non verb: "i said" bullocks + auto_join_text_ed: "i", // [string] auto text for past tense: "i" surfed + auto_join_text_ing: "i am", // [string] auto tense for present tense: "i was" surfing + auto_join_text_reply: "i replied to", // [string] auto tense for replies: "i replied to" @someone "with" + auto_join_text_url: "i was looking at", // [string] auto tense for urls: "i was looking at" http:... + loading_text: null, // [string] optional loading text, displayed while tweets load + query: null // [string] optional search query + }; + + if(o) $.extend(s, o); + + $.fn.extend({ + linkUrl: function() { + var returning = []; + var regexp = /((ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?)/gi; + this.each(function() { + returning.push(this.replace(regexp,"$1")); + }); + return $(returning); + }, + linkUser: function() { + var returning = []; + var regexp = /[\@]+([A-Za-z0-9-_]+)/gi; + this.each(function() { + returning.push(this.replace(regexp,"@$1")); + }); + return $(returning); + }, + linkHash: function() { + var returning = []; + var regexp = / [\#]+([A-Za-z0-9-_]+)/gi; + this.each(function() { + returning.push(this.replace(regexp, ' #$1')); + }); + return $(returning); + }, + capAwesome: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/\b(awesome)\b/gi, '$1')); + }); + return $(returning); + }, + capEpic: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/\b(epic)\b/gi, '$1')); + }); + return $(returning); + }, + makeHeart: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/(<)+[3]/gi, "")); + }); + return $(returning); + } + }); + + function relative_time(time_value) { + var parsed_date = Date.parse(time_value); + var relative_to = (arguments.length > 1) ? arguments[1] : new Date(); + var delta = parseInt((relative_to.getTime() - parsed_date) / 1000); + var pluralize = function (singular, n) { + return '' + n + ' ' + singular + (n == 1 ? '' : 's'); + }; + if(delta < 60) { + return 'less than a minute ago'; + } else if(delta < (45*60)) { + return 'about ' + pluralize("minute", parseInt(delta / 60)) + ' ago'; + } else if(delta < (24*60*60)) { + return 'about ' + pluralize("hour", parseInt(delta / 3600)) + ' ago'; + } else { + return 'about ' + pluralize("day", parseInt(delta / 86400)) + ' ago'; + } + } + + function build_url() { + var proto = ('https:' == document.location.protocol ? 'https:' : 'http:'); + if (s.list) { + return proto+"//api.twitter.com/1/"+s.username[0]+"/lists/"+s.list+"/statuses.json?per_page="+s.count+"&callback=?"; + } else if (s.query == null && s.username.length == 1) { + return proto+'//twitter.com/status/user_timeline/'+s.username[0]+'.json?count='+s.count+'&callback=?'; + } else { + var query = (s.query || 'from:'+s.username.join('%20OR%20from:')); + return proto+'//search.twitter.com/search.json?&q='+query+'&rpp='+s.count+'&callback=?'; + } + } + + return this.each(function(){ + var list = $('